From a6be5a5fd694820cd84a3ebc919f377d676e3aa6 Mon Sep 17 00:00:00 2001 From: Tomas Nevrlka Date: Thu, 19 Dec 2024 16:23:04 +0100 Subject: [PATCH 1/5] create scaffolding for buildah 0.3 - Buildah 0.3 will be released - Copy buildah 0.2 directory to 0.3 for easier diff comparisons between the versions --- task/buildah/0.3/README.md | 54 +++ task/buildah/0.3/buildah.yaml | 702 ++++++++++++++++++++++++++++ task/buildah/0.3/kustomization.yaml | 5 + 3 files changed, 761 insertions(+) create mode 100644 task/buildah/0.3/README.md create mode 100644 task/buildah/0.3/buildah.yaml create mode 100644 task/buildah/0.3/kustomization.yaml diff --git a/task/buildah/0.3/README.md b/task/buildah/0.3/README.md new file mode 100644 index 0000000000..6be68eb10e --- /dev/null +++ b/task/buildah/0.3/README.md @@ -0,0 +1,54 @@ +# buildah task + +Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. +In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. +When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts. +When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment. + +## Parameters +| name | description | default value | required | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | -------- | +| IMAGE | Reference of the image buildah will produce. | | true | +| DOCKERFILE | Path to the Dockerfile to build. | ./Dockerfile | false | +| CONTEXT | Path to the directory to use as context. | . | false | +| TLSVERIFY | Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry) | true | false | +| HERMETIC | Determines if build will be executed without network access. | false | false | +| PREFETCH_INPUT | In case it is not empty, the prefetched content should be made available to the build. | "" | false | +| IMAGE_EXPIRES_AFTER | Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. | "" | false | +| COMMIT_SHA | The image is built from this commit. | "" | false | +| YUM_REPOS_D_SRC | Path in the git repository in which yum repository files are stored | repos.d | false | +| YUM_REPOS_D_FETCHED | Path in source workspace where dynamically-fetched repos are present | fetched.repos.d | false | +| YUM_REPOS_D_TARGET | Target path on the container in which yum repository files should be made available | /etc/yum.repos.d | false | +| TARGET_STAGE | Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage. | "" | false | +| ENTITLEMENT_SECRET | Name of secret which contains the entitlement certificates | etc-pki-entitlement | false | +| ACTIVATION_KEY | Name of secret which contains subscription activation key | activation-key | false | +| ADDITIONAL_SECRET | Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET | does-not-exist | false | +| BUILD_ARGS | Array of --build-arg values ("arg=value" strings) | [] | false | +| BUILD_ARGS_FILE | Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file | "" | false | +| caTrustConfigMapName | The name of the ConfigMap to read CA bundle data from. | trusted-ca | false | +| caTrustConfigMapKey | The name of the key in the ConfigMap that contains the CA bundle data. | ca-bundle.crt | false | +| ADD_CAPABILITIES | Comma separated list of extra capabilities to add when running 'buildah build' | "" | false | +| SQUASH | Squash all new and previous layers added as a part of this build, as per --squash | false | false | +| STORAGE_DRIVER | Storage driver to configure for buildah | vfs | false | +| SKIP_UNUSED_STAGES | Whether to skip stages in Containerfile that seem unused by subsequent stages | true | false | + +## Results +| name | description | +| --------------------------- | --------------------------------------------------------------------------------- | +| IMAGE_DIGEST | Digest of the image just built | +| IMAGE_URL | Image repository and tag where the built image was pushed | +| IMAGE_REF | Image reference of the built image | +| SBOM_BLOB_URL | Reference of SBOM blob digest to enable digest-based verification from provenance | +| SBOM_JAVA_COMPONENTS_COUNT | The counting of Java components by publisher in JSON format | +| JAVA_COMMUNITY_DEPENDENCIES | The Java dependencies that came from community sources such as Maven central. | + +## Workspaces +| name | description | optional | +| ------ | ---------------------------------------------- | -------- | +| source | Workspace containing the source code to build. | false | + + +## Changes in 0.2.1 +- Added image reference to the SBOM output file. +- Re-arranged steps to push image first and then generate and push SBOM file. +- Remove SBOM file stored in the image under `/root/buildinfo/content_manifests/` diff --git a/task/buildah/0.3/buildah.yaml b/task/buildah/0.3/buildah.yaml new file mode 100644 index 0000000000..266c92c319 --- /dev/null +++ b/task/buildah/0.3/buildah.yaml @@ -0,0 +1,702 @@ +apiVersion: tekton.dev/v1 +kind: Task +metadata: + labels: + app.kubernetes.io/version: "0.2.1" + build.appstudio.redhat.com/build_type: "docker" + annotations: + tekton.dev/pipelines.minVersion: "0.12.1" + tekton.dev/tags: "image-build, konflux" + name: buildah +spec: + description: |- + Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. + In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. + When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts. + When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment. + params: + - description: Reference of the image buildah will produce. + name: IMAGE + type: string + - default: ./Dockerfile + description: Path to the Dockerfile to build. + name: DOCKERFILE + type: string + - default: . + description: Path to the directory to use as context. + name: CONTEXT + type: string + - default: "true" + description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry) + name: TLSVERIFY + type: string + - default: "false" + description: Determines if build will be executed without network access. + name: HERMETIC + type: string + - default: "" + description: In case it is not empty, the prefetched content should be made available to the build. + name: PREFETCH_INPUT + type: string + - default: "" + description: Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. + name: IMAGE_EXPIRES_AFTER + type: string + - name: COMMIT_SHA + description: The image is built from this commit. + type: string + default: "" + - name: YUM_REPOS_D_SRC + description: Path in the git repository in which yum repository files are stored + default: repos.d + - name: YUM_REPOS_D_FETCHED + description: Path in source workspace where dynamically-fetched repos are present + default: fetched.repos.d + - name: YUM_REPOS_D_TARGET + description: Target path on the container in which yum repository files should be made available + default: /etc/yum.repos.d + - name: TARGET_STAGE + description: Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage. + type: string + default: "" + - name: ENTITLEMENT_SECRET + description: Name of secret which contains the entitlement certificates + type: string + default: "etc-pki-entitlement" + - name: ACTIVATION_KEY + default: activation-key + description: Name of secret which contains subscription activation key + type: string + - name: ADDITIONAL_SECRET + description: Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET + type: string + default: "does-not-exist" + - name: BUILD_ARGS + description: Array of --build-arg values ("arg=value" strings) + type: array + default: [] + - name: BUILD_ARGS_FILE + description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file + type: string + default: "" + - name: caTrustConfigMapName + type: string + description: The name of the ConfigMap to read CA bundle data from. + default: trusted-ca + - name: caTrustConfigMapKey + type: string + description: The name of the key in the ConfigMap that contains the CA bundle data. + default: ca-bundle.crt + - name: ADD_CAPABILITIES + description: Comma separated list of extra capabilities to add when running 'buildah build' + type: string + default: "" + - name: SQUASH + description: Squash all new and previous layers added as a part of this build, as per --squash + type: string + default: "false" + - name: STORAGE_DRIVER + description: Storage driver to configure for buildah + type: string + default: vfs + - name: SKIP_UNUSED_STAGES + description: Whether to skip stages in Containerfile that seem unused by subsequent stages + type: string + default: "true" + - name: LABELS + description: Additional key=value labels that should be applied to the image + type: array + default: [] + + results: + - description: Digest of the image just built + name: IMAGE_DIGEST + - description: Image repository and tag where the built image was pushed + name: IMAGE_URL + - description: Image reference of the built image + name: IMAGE_REF + - name: SBOM_BLOB_URL + description: Reference of SBOM blob digest to enable digest-based verification from provenance + type: string + - name: SBOM_JAVA_COMPONENTS_COUNT + description: The counting of Java components by publisher in JSON format + type: string + - name: JAVA_COMMUNITY_DEPENDENCIES + description: The Java dependencies that came from community sources such as Maven central. + stepTemplate: + computeResources: + limits: + memory: 4Gi + cpu: '4' + requests: + memory: 1Gi + cpu: '1' + volumeMounts: + - mountPath: /shared + name: shared + env: + - name: BUILDAH_FORMAT + value: oci + - name: STORAGE_DRIVER + value: $(params.STORAGE_DRIVER) + - name: HERMETIC + value: $(params.HERMETIC) + - name: SOURCE_CODE_DIR + value: source + - name: CONTEXT + value: $(params.CONTEXT) + - name: IMAGE + value: $(params.IMAGE) + - name: TLSVERIFY + value: $(params.TLSVERIFY) + - name: IMAGE_EXPIRES_AFTER + value: $(params.IMAGE_EXPIRES_AFTER) + - name: YUM_REPOS_D_SRC + value: $(params.YUM_REPOS_D_SRC) + - name: YUM_REPOS_D_FETCHED + value: $(params.YUM_REPOS_D_FETCHED) + - name: YUM_REPOS_D_TARGET + value: $(params.YUM_REPOS_D_TARGET) + - name: TARGET_STAGE + value: $(params.TARGET_STAGE) + - name: ENTITLEMENT_SECRET + value: $(params.ENTITLEMENT_SECRET) + - name: ACTIVATION_KEY + value: $(params.ACTIVATION_KEY) + - name: ADDITIONAL_SECRET + value: $(params.ADDITIONAL_SECRET) + - name: BUILD_ARGS_FILE + value: $(params.BUILD_ARGS_FILE) + - name: ADD_CAPABILITIES + value: $(params.ADD_CAPABILITIES) + - name: SQUASH + value: $(params.SQUASH) + - name: SKIP_UNUSED_STAGES + value: $(params.SKIP_UNUSED_STAGES) + + steps: + - image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + name: build + computeResources: + limits: + memory: 8Gi + cpu: '4' + requests: + memory: 2Gi + cpu: '1' + env: + - name: COMMIT_SHA + value: $(params.COMMIT_SHA) + - name: DOCKERFILE + value: $(params.DOCKERFILE) + args: + - --build-args + - $(params.BUILD_ARGS[*]) + - --labels + - $(params.LABELS[*]) + + script: | + #!/bin/bash + set -euo pipefail + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" + elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE" + elif [ -e "$DOCKERFILE" ]; then + # Instrumented builds (SAST) use this custom dockerffile step as their base + dockerfile_path="$DOCKERFILE" + elif echo "$DOCKERFILE" | grep -q "^https\?://"; then + echo "Fetch Dockerfile from $DOCKERFILE" + dockerfile_path=$(mktemp --suffix=-Dockerfile) + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE") + if [ $http_code != 200 ]; then + echo "No Dockerfile is fetched. Server responds $http_code" + exit 1 + fi + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore") + if [ $http_code = 200 ]; then + echo "Fetched .dockerignore from $DOCKERFILE.dockerignore" + mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore + fi + else + echo "Cannot find Dockerfile $DOCKERFILE" + exit 1 + fi + + dockerfile_copy=$(mktemp --tmpdir "$(basename "$dockerfile_path").XXXXXX") + cp "$dockerfile_path" "$dockerfile_copy" + + if [ -n "${JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR-}" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_copy"; then + sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_copy" + touch /var/lib/containers/java + fi + + # Fixing group permission on /var/lib/containers + chown root:root /var/lib/containers + + sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf + + # Setting new namespace to run buildah - 2^32-2 + echo 'root:1:4294967294' | tee -a /etc/subuid >> /etc/subgid + + build_args=() + if [ -n "${BUILD_ARGS_FILE}" ]; then + # Parse BUILD_ARGS_FILE ourselves because dockerfile-json doesn't support it + echo "Parsing ARGs from $BUILD_ARGS_FILE" + mapfile -t build_args < <( + # https://www.mankier.com/1/buildah-build#--build-arg-file + # delete lines that start with # + # delete blank lines + sed -e '/^#/d' -e '/^\s*$/d' "${SOURCE_CODE_DIR}/${BUILD_ARGS_FILE}" + ) + fi + + LABELS=() + # Split `args` into two sets of arguments. + while [[ $# -gt 0 ]]; do + case $1 in + --build-args) + shift + # Note: this may result in multiple --build-arg=KEY=value flags with the same KEY being + # passed to buildah. In that case, the *last* occurrence takes precedence. This is why + # we append BUILD_ARGS after the content of the BUILD_ARGS_FILE + while [[ $# -gt 0 && $1 != --* ]]; do build_args+=("$1"); shift; done + ;; + --labels) + shift + while [[ $# -gt 0 && $1 != --* ]]; do LABELS+=("--label" "$1"); shift; done + ;; + *) + echo "unexpected argument: $1" >&2 + exit 2 + ;; + esac + done + + BUILD_ARG_FLAGS=() + for build_arg in "${build_args[@]}"; do + BUILD_ARG_FLAGS+=("--build-arg=$build_arg") + done + + dockerfile-json "${BUILD_ARG_FLAGS[@]}" "$dockerfile_copy" > /shared/parsed_dockerfile.json + BASE_IMAGES=$( + jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName | select(test("^oci-archive:") | not)' /shared/parsed_dockerfile.json + ) + + BUILDAH_ARGS=() + UNSHARE_ARGS=() + + if [ "${HERMETIC}" == "true" ]; then + BUILDAH_ARGS+=("--pull=never") + UNSHARE_ARGS+=("--net") + + for image in $BASE_IMAGES; do + unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image + done + echo "Build will be executed with network isolation" + fi + + if [ -n "${TARGET_STAGE}" ]; then + BUILDAH_ARGS+=("--target=${TARGET_STAGE}") + fi + + BUILDAH_ARGS+=("${BUILD_ARG_FLAGS[@]}") + + if [ -n "${ADD_CAPABILITIES}" ]; then + BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}") + fi + + if [ "${SQUASH}" == "true" ]; then + BUILDAH_ARGS+=("--squash") + fi + + if [ "${SKIP_UNUSED_STAGES}" != "true" ] ; then + BUILDAH_ARGS+=("--skip-unused-stages=false") + fi + + VOLUME_MOUNTS=() + + if [ -f "$(workspaces.source.path)/cachi2/cachi2.env" ]; then + cp -r "$(workspaces.source.path)/cachi2" /tmp/ + chmod -R go+rwX /tmp/cachi2 + VOLUME_MOUNTS+=(--volume /tmp/cachi2:/cachi2) + # Read in the whole file (https://unix.stackexchange.com/questions/533277), then + # for each RUN ... line insert the cachi2.env command *after* any options like --mount + sed -E -i \ + -e 'H;1h;$!d;x' \ + -e 's@^\s*(run((\s|\\\n)+-\S+)*(\s|\\\n)+)@\1. /cachi2/cachi2.env \&\& \\\n @igM' \ + "$dockerfile_copy" + echo "Prefetched content will be made available" + + prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo" + if [ -f "$prefetched_repo_for_my_arch" ]; then + echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED" + mkdir -p "$YUM_REPOS_D_FETCHED" + cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED" + fi + fi + + # if yum repofiles stored in git, copy them to mount point outside the source dir + if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then + mkdir -p ${YUM_REPOS_D_FETCHED} + cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED} + fi + + # if anything in the repofiles mount point (either fetched or from git), mount it + if [ -d "${YUM_REPOS_D_FETCHED}" ]; then + chmod -R go+rwX ${YUM_REPOS_D_FETCHED} + mount_point=$(realpath ${YUM_REPOS_D_FETCHED}) + VOLUME_MOUNTS+=(--volume "${mount_point}:${YUM_REPOS_D_TARGET}") + fi + + DEFAULT_LABELS=( + "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')" + "--label" "architecture=$(uname -m)" + "--label" "vcs-type=git" + ) + [ -n "$COMMIT_SHA" ] && DEFAULT_LABELS+=("--label" "vcs-ref=$COMMIT_SHA") + [ -n "$IMAGE_EXPIRES_AFTER" ] && DEFAULT_LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER") + + # Concatenate defaults and explicit labels. If a label appears twice, the last one wins. + LABELS=("${DEFAULT_LABELS[@]}" "${LABELS[@]}") + + ACTIVATION_KEY_PATH="/activation-key" + ENTITLEMENT_PATH="/entitlement" + + # 0. if hermetic=true, skip all subscription related stuff + # 1. do not enable activation key and entitlement at same time. If both vars are provided, prefer activation key. + # 2. Activation-keys will be used when the key 'org' exists in the activation key secret. + # 3. try to pre-register and mount files to the correct location so that users do no need to modify Dockerfiles. + # 3. If the Dockerfile contains the string "subcription-manager register", add the activation-keys volume + # to buildah but don't pre-register for backwards compatibility. Mount an empty directory on + # shared emptydir volume to "/etc/pki/entitlement" to prevent certificates from being included + + if [ "${HERMETIC}" != "true" ] && [ -e /activation-key/org ]; then + cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key + mkdir -p /shared/rhsm/etc/pki/entitlement + mkdir -p /shared/rhsm/etc/pki/consumer + + VOLUME_MOUNTS+=(-v /tmp/activation-key:/activation-key \ + -v /shared/rhsm/etc/pki/entitlement:/etc/pki/entitlement:Z \ + -v /shared/rhsm/etc/pki/consumer:/etc/pki/consumer:Z) + echo "Adding activation key to the build" + + if ! grep -E "^[^#]*subscription-manager.[^#]*register" "$dockerfile_path"; then + # user is not running registration in the Containerfile: pre-register. + echo "Pre-registering with subscription manager." + subscription-manager register --org "$(cat /tmp/activation-key/org)" --activationkey "$(cat /tmp/activation-key/activationkey)" + trap 'subscription-manager unregister || true' EXIT + + # copy generated certificates to /shared volume + cp /etc/pki/entitlement/*.pem /shared/rhsm/etc/pki/entitlement + cp /etc/pki/consumer/*.pem /shared/rhsm/etc/pki/consumer + + # and then mount get /etc/rhsm/ca/redhat-uep.pem into /run/secrets/rhsm/ca + VOLUME_MOUNTS+=(--volume /etc/rhsm/ca/redhat-uep.pem:/etc/rhsm/ca/redhat-uep.pem:Z) + fi + + elif [ "${HERMETIC}" != "true" ] && find /entitlement -name "*.pem" >> null; then + cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement + VOLUME_MOUNTS+=(--volume /tmp/entitlement:/etc/pki/entitlement) + echo "Adding the entitlement to the build" + fi + + if [ -n "${ADDITIONAL_VOLUME_MOUNTS-}" ]; then + # ADDITIONAL_VOLUME_MOUNTS allows to specify more volumes for the build. + # Instrumented builds (SAST) use this step as their base and add some other tools. + while read -r volume_mount; do + VOLUME_MOUNTS+=("--volume=$volume_mount") + done <<< "$ADDITIONAL_VOLUME_MOUNTS" + fi + + ADDITIONAL_SECRET_PATH="/additional-secret" + ADDITIONAL_SECRET_TMP="/tmp/additional-secret" + if [ -d "$ADDITIONAL_SECRET_PATH" ]; then + cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP + while read -r filename; do + echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}" + BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}") + done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;) + fi + + # Prevent ShellCheck from giving a warning because 'image' is defined and 'IMAGE' is not. + declare IMAGE + + buildah_cmd_array=( + buildah build + "${VOLUME_MOUNTS[@]}" + "${BUILDAH_ARGS[@]}" + "${LABELS[@]}" + --tls-verify="$TLSVERIFY" --no-cache + --ulimit nofile=4096:4096 + -f "$dockerfile_copy" -t "$IMAGE" . + ) + buildah_cmd=$(printf "%q " "${buildah_cmd_array[@]}") + + if [ "${HERMETIC}" == "true" ]; then + # enabling loopback adapter enables Bazel builds to work in hermetic mode. + command="ip link set lo up && $buildah_cmd" + else + command="$buildah_cmd" + fi + + # disable host subcription manager integration + find /usr/share/rhel/secrets -type l -exec unlink {} \; + + unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- sh -c "$command" + + container=$(buildah from --pull-never "$IMAGE") + + # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later + if [ -f "/tmp/cachi2/output/bom.json" ]; then + echo "Making copy of sbom-cachi2.json" + cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json + fi + + buildah mount $container | tee /shared/container_path + # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners + find $(cat /shared/container_path) -xtype l -delete + echo $container > /shared/container_name + + touch /shared/base_images_digests + for image in $BASE_IMAGES; do + base_image_digest=$(buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image") + # In some cases, there might be BASE_IMAGES, but not any associated digest. This happens + # if buildah did not use that particular image during build because it was skipped + if [ -n "$base_image_digest" ]; then + echo "$image $base_image_digest" >> /shared/base_images_digests + fi + done + + securityContext: + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: "/entitlement" + name: etc-pki-entitlement + - mountPath: /activation-key + name: activation-key + - mountPath: "/additional-secret" + name: additional-secret + - name: trusted-ca + mountPath: /mnt/trusted-ca + readOnly: true + workingDir: $(workspaces.source.path) + - name: icm + image: quay.io/konflux-ci/icm-injection-scripts:latest@sha256:462980e94ba689b5f56c3d5dfb3358cd8c685300daf65a71532f11898935e7f1 + securityContext: + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + workingDir: $(workspaces.source.path) + args: [$(params.IMAGE)] + - name: push + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + script: | + #!/bin/bash + set -e + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + retries=5 + # Push to a unique tag based on the TaskRun name to avoid race conditions + echo "Pushing to ${IMAGE%:*}:${TASKRUN_NAME}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + "$IMAGE" \ + "docker://${IMAGE%:*}:$(context.taskRun.name)"; + then + echo "Failed to push sbom image to ${IMAGE%:*}:$(context.taskRun.name) after ${retries} tries" + exit 1 + fi + + # Push to a tag based on the git revision + echo "Pushing to ${IMAGE}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + --digestfile "$(workspaces.source.path)/image-digest" "$IMAGE" \ + "docker://$IMAGE"; + then + echo "Failed to push sbom image to $IMAGE after ${retries} tries" + exit 1 + fi + + cat "$(workspaces.source.path)"/image-digest | tee $(results.IMAGE_DIGEST.path) + echo -n "$IMAGE" | tee $(results.IMAGE_URL.path) + { + echo -n "${IMAGE}@" + cat "$(workspaces.source.path)/image-digest" + } > "$(results.IMAGE_REF.path)" + + securityContext: + runAsUser: 0 + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - name: trusted-ca + mountPath: /mnt/trusted-ca + readOnly: true + workingDir: $(workspaces.source.path) + + - name: sbom-syft-generate + image: registry.access.redhat.com/rh-syft-tech-preview/syft-rhel9:1.4.1@sha256:34d7065427085a31dc4949bd283c001b91794d427e1e4cdf1b21ea4faf9fee3f + # Respect Syft configuration if the user has it in the root of their repository + # (need to set the workdir, see https://github.com/anchore/syft/issues/2465) + workingDir: $(workspaces.source.path)/source + script: | + echo "Running syft on the source directory" + syft dir:"$(workspaces.source.path)/$SOURCE_CODE_DIR/$CONTEXT" --output cyclonedx-json="$(workspaces.source.path)/sbom-source.json" + echo "Running syft on the image filesystem" + syft dir:"$(cat /shared/container_path)" --output cyclonedx-json="$(workspaces.source.path)/sbom-image.json" + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /shared + name: shared + - name: analyse-dependencies-java-sbom + image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77 + computeResources: + limits: + memory: 512Mi + cpu: 200m + requests: + memory: 256Mi + cpu: 100m + script: | + if [ -f /var/lib/containers/java ]; then + /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s $(workspaces.source.path)/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path) + sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875 + else + touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path) + fi + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /shared + name: shared + securityContext: + runAsUser: 0 + + - name: prepare-sboms + image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:9f1fd11d9c3c517ecc112d192ad361d16ecf6ce00b83b109c93cf3d1c644a357 + computeResources: + limits: + memory: 512Mi + cpu: 200m + requests: + memory: 256Mi + cpu: 100m + script: | + echo "Merging contents of sbom-source.json and sbom-image.json into sbom-cyclonedx.json" + python3 /scripts/merge_syft_sboms.py + + if [ -f "sbom-cachi2.json" ]; then + echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json" + python3 /scripts/merge_cachi2_sboms.py sbom-cachi2.json sbom-cyclonedx.json > sbom-temp.json + mv sbom-temp.json sbom-cyclonedx.json + fi + + echo "Adding base images data to sbom-cyclonedx.json" + python3 /scripts/base_images_sbom_script.py \ + --sbom=sbom-cyclonedx.json \ + --parsed-dockerfile=/shared/parsed_dockerfile.json \ + --base-images-digests=/shared/base_images_digests + + echo "Adding image reference to sbom" + IMAGE_URL="$(cat "$(results.IMAGE_URL.path)")" + IMAGE_DIGEST="$(cat "$(results.IMAGE_DIGEST.path)")" + + python3 /scripts/add_image_reference.py \ + --image-url "$IMAGE_URL" \ + --image-digest "$IMAGE_DIGEST" \ + --input-file sbom-cyclonedx.json \ + --output-file /tmp/sbom-cyclonedx.tmp.json + + mv /tmp/sbom-cyclonedx.tmp.json sbom-cyclonedx.json + workingDir: $(workspaces.source.path) + securityContext: + runAsUser: 0 + + - name: upload-sbom + image: quay.io/konflux-ci/appstudio-utils:48c311af02858e2422d6229600e9959e496ddef1@sha256:91ddd999271f65d8ec8487b10f3dd378f81aa894e11b9af4d10639fd52bba7e8 + script: | + #!/bin/bash + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")" + + # Remove tag from IMAGE while allowing registry to contain a port number. + sbom_repo="${IMAGE%:*}" + sbom_digest="$(sha256sum sbom-cyclonedx.json | cut -d' ' -f1)" + # The SBOM_BLOB_URL is created by `cosign attach sbom`. + echo -n "${sbom_repo}@sha256:${sbom_digest}" | tee "$(results.SBOM_BLOB_URL.path)" + + computeResources: + limits: + memory: 512Mi + cpu: 200m + requests: + memory: 256Mi + cpu: 100m + volumeMounts: + - name: trusted-ca + mountPath: /mnt/trusted-ca + readOnly: true + workingDir: $(workspaces.source.path) + + volumes: + - name: varlibcontainers + emptyDir: {} + - name: shared + emptyDir: {} + - name: etc-pki-entitlement + secret: + secretName: $(params.ENTITLEMENT_SECRET) + optional: true + - name: activation-key + secret: + optional: true + secretName: $(params.ACTIVATION_KEY) + - name: additional-secret + secret: + secretName: $(params.ADDITIONAL_SECRET) + optional: true + - name: trusted-ca + configMap: + name: $(params.caTrustConfigMapName) + items: + - key: $(params.caTrustConfigMapKey) + path: ca-bundle.crt + optional: true + workspaces: + - name: source + description: Workspace containing the source code to build. diff --git a/task/buildah/0.3/kustomization.yaml b/task/buildah/0.3/kustomization.yaml new file mode 100644 index 0000000000..6a3c230a1f --- /dev/null +++ b/task/buildah/0.3/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- buildah.yaml From 0d6d7f6d114e9d1846f9dc3922bb7ea38d77d5dc Mon Sep 17 00:00:00 2001 From: Tomas Nevrlka Date: Thu, 19 Dec 2024 16:44:19 +0100 Subject: [PATCH 2/5] remove references to jvm-build-service - jvm-build-service is not used by anyone and will get reworked - buildah task contains references to jvm-build-service - Removal of the references is a breaking change * Remove `analyse-dependencies-java-sbom` step * Remove `SBOM_JAVA_COMPONENTS_COUNT` result * Remove `JAVA_COMMUNITY_DEPENDENCIES` result - Release buildah version 0.3 --- task/buildah/0.3/MIGRATION.md | 15 +++++++ task/buildah/0.3/README.md | 82 ++++++++++++++++------------------- task/buildah/0.3/buildah.yaml | 38 +--------------- 3 files changed, 54 insertions(+), 81 deletions(-) create mode 100644 task/buildah/0.3/MIGRATION.md diff --git a/task/buildah/0.3/MIGRATION.md b/task/buildah/0.3/MIGRATION.md new file mode 100644 index 0000000000..3e2f4824cb --- /dev/null +++ b/task/buildah/0.3/MIGRATION.md @@ -0,0 +1,15 @@ +# Migration from 0.2 to 0.3 + +Version 0.3: + +Removes references to `jvm-build-service` +* Removes `analyse-dependencies-java-sbom` step +* Removes `SBOM_JAVA_COMPONENTS_COUNT` and `JAVA_COMMUNITY_DEPENDENCIES` results + +## Action from users + +The removed items are not in use, so their removal will not impact users. + +A Renovate bot PR will be created with a warning icon for this task. + +This is expected, and no action from users is needed. \ No newline at end of file diff --git a/task/buildah/0.3/README.md b/task/buildah/0.3/README.md index 6be68eb10e..bbd0c90400 100644 --- a/task/buildah/0.3/README.md +++ b/task/buildah/0.3/README.md @@ -1,54 +1,46 @@ # buildah task Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. -In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. -When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts. -When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment. +In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. +When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. ## Parameters -| name | description | default value | required | -| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | -------- | -| IMAGE | Reference of the image buildah will produce. | | true | -| DOCKERFILE | Path to the Dockerfile to build. | ./Dockerfile | false | -| CONTEXT | Path to the directory to use as context. | . | false | -| TLSVERIFY | Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry) | true | false | -| HERMETIC | Determines if build will be executed without network access. | false | false | -| PREFETCH_INPUT | In case it is not empty, the prefetched content should be made available to the build. | "" | false | -| IMAGE_EXPIRES_AFTER | Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. | "" | false | -| COMMIT_SHA | The image is built from this commit. | "" | false | -| YUM_REPOS_D_SRC | Path in the git repository in which yum repository files are stored | repos.d | false | -| YUM_REPOS_D_FETCHED | Path in source workspace where dynamically-fetched repos are present | fetched.repos.d | false | -| YUM_REPOS_D_TARGET | Target path on the container in which yum repository files should be made available | /etc/yum.repos.d | false | -| TARGET_STAGE | Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage. | "" | false | -| ENTITLEMENT_SECRET | Name of secret which contains the entitlement certificates | etc-pki-entitlement | false | -| ACTIVATION_KEY | Name of secret which contains subscription activation key | activation-key | false | -| ADDITIONAL_SECRET | Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET | does-not-exist | false | -| BUILD_ARGS | Array of --build-arg values ("arg=value" strings) | [] | false | -| BUILD_ARGS_FILE | Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file | "" | false | -| caTrustConfigMapName | The name of the ConfigMap to read CA bundle data from. | trusted-ca | false | -| caTrustConfigMapKey | The name of the key in the ConfigMap that contains the CA bundle data. | ca-bundle.crt | false | -| ADD_CAPABILITIES | Comma separated list of extra capabilities to add when running 'buildah build' | "" | false | -| SQUASH | Squash all new and previous layers added as a part of this build, as per --squash | false | false | -| STORAGE_DRIVER | Storage driver to configure for buildah | vfs | false | -| SKIP_UNUSED_STAGES | Whether to skip stages in Containerfile that seem unused by subsequent stages | true | false | +|name|description|default value|required| +|---|---|---|---| +|IMAGE|Reference of the image buildah will produce.||true| +|DOCKERFILE|Path to the Dockerfile to build.|./Dockerfile|false| +|CONTEXT|Path to the directory to use as context.|.|false| +|TLSVERIFY|Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)|true|false| +|HERMETIC|Determines if build will be executed without network access.|false|false| +|PREFETCH_INPUT|In case it is not empty, the prefetched content should be made available to the build.|""|false| +|IMAGE_EXPIRES_AFTER|Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.|""|false| +|COMMIT_SHA|The image is built from this commit.|""|false| +|YUM_REPOS_D_SRC|Path in the git repository in which yum repository files are stored|repos.d|false| +|YUM_REPOS_D_FETCHED|Path in source workspace where dynamically-fetched repos are present|fetched.repos.d|false| +|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false| +|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false| +|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false| +|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false| +|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false| +|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false| +|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false| +|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false| +|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false| +|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false| +|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false| +|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false| +|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false| +|LABELS|Additional key=value labels that should be applied to the image|[]|false| ## Results -| name | description | -| --------------------------- | --------------------------------------------------------------------------------- | -| IMAGE_DIGEST | Digest of the image just built | -| IMAGE_URL | Image repository and tag where the built image was pushed | -| IMAGE_REF | Image reference of the built image | -| SBOM_BLOB_URL | Reference of SBOM blob digest to enable digest-based verification from provenance | -| SBOM_JAVA_COMPONENTS_COUNT | The counting of Java components by publisher in JSON format | -| JAVA_COMMUNITY_DEPENDENCIES | The Java dependencies that came from community sources such as Maven central. | +|name|description| +|---|---| +|IMAGE_DIGEST|Digest of the image just built| +|IMAGE_URL|Image repository and tag where the built image was pushed| +|IMAGE_REF|Image reference of the built image| +|SBOM_BLOB_URL|Reference of SBOM blob digest to enable digest-based verification from provenance| ## Workspaces -| name | description | optional | -| ------ | ---------------------------------------------- | -------- | -| source | Workspace containing the source code to build. | false | - - -## Changes in 0.2.1 -- Added image reference to the SBOM output file. -- Re-arranged steps to push image first and then generate and push SBOM file. -- Remove SBOM file stored in the image under `/root/buildinfo/content_manifests/` +|name|description|optional| +|---|---|---| +|source|Workspace containing the source code to build.|false| diff --git a/task/buildah/0.3/buildah.yaml b/task/buildah/0.3/buildah.yaml index 266c92c319..e011de82df 100644 --- a/task/buildah/0.3/buildah.yaml +++ b/task/buildah/0.3/buildah.yaml @@ -11,9 +11,8 @@ metadata: spec: description: |- Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. - In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. - When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts. - When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment. + In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. + When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. params: - description: Reference of the image buildah will produce. name: IMAGE @@ -118,11 +117,6 @@ spec: - name: SBOM_BLOB_URL description: Reference of SBOM blob digest to enable digest-based verification from provenance type: string - - name: SBOM_JAVA_COMPONENTS_COUNT - description: The counting of Java components by publisher in JSON format - type: string - - name: JAVA_COMMUNITY_DEPENDENCIES - description: The Java dependencies that came from community sources such as Maven central. stepTemplate: computeResources: limits: @@ -233,11 +227,6 @@ spec: dockerfile_copy=$(mktemp --tmpdir "$(basename "$dockerfile_path").XXXXXX") cp "$dockerfile_path" "$dockerfile_copy" - if [ -n "${JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR-}" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_copy"; then - sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_copy" - touch /var/lib/containers/java - fi - # Fixing group permission on /var/lib/containers chown root:root /var/lib/containers @@ -576,29 +565,6 @@ spec: name: varlibcontainers - mountPath: /shared name: shared - - name: analyse-dependencies-java-sbom - image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77 - computeResources: - limits: - memory: 512Mi - cpu: 200m - requests: - memory: 256Mi - cpu: 100m - script: | - if [ -f /var/lib/containers/java ]; then - /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s $(workspaces.source.path)/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path) - sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875 - else - touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path) - fi - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /shared - name: shared - securityContext: - runAsUser: 0 - name: prepare-sboms image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:9f1fd11d9c3c517ecc112d192ad361d16ecf6ce00b83b109c93cf3d1c644a357 From 90f89054a87bd175a0962e2d55e9bd185ef814da Mon Sep 17 00:00:00 2001 From: Tomas Nevrlka Date: Thu, 19 Dec 2024 16:45:01 +0100 Subject: [PATCH 3/5] upgrade buildah remote and ta tasks to 0.3 - Buildah task has been upgraded to 0.3 - Upgrade the oci-ta and remote tasks too --- hack/generate-buildah-remote.sh | 2 +- task/buildah-oci-ta/0.3/MIGRATION.md | 15 + task/buildah-oci-ta/0.3/README.md | 44 + task/buildah-oci-ta/0.3/buildah-oci-ta.yaml | 693 +++++++++++++++ task/buildah-oci-ta/0.3/recipe.yaml | 13 + task/buildah-remote-oci-ta/0.3/MIGRATION.md | 15 + task/buildah-remote-oci-ta/0.3/README.md | 46 + .../0.3/buildah-remote-oci-ta.yaml | 837 ++++++++++++++++++ task/buildah-remote/0.3/MIGRATION.md | 15 + task/buildah-remote/0.3/README.md | 48 + task/buildah-remote/0.3/buildah-remote.yaml | 816 +++++++++++++++++ 11 files changed, 2543 insertions(+), 1 deletion(-) create mode 100644 task/buildah-oci-ta/0.3/MIGRATION.md create mode 100644 task/buildah-oci-ta/0.3/README.md create mode 100644 task/buildah-oci-ta/0.3/buildah-oci-ta.yaml create mode 100644 task/buildah-oci-ta/0.3/recipe.yaml create mode 100644 task/buildah-remote-oci-ta/0.3/MIGRATION.md create mode 100644 task/buildah-remote-oci-ta/0.3/README.md create mode 100644 task/buildah-remote-oci-ta/0.3/buildah-remote-oci-ta.yaml create mode 100644 task/buildah-remote/0.3/MIGRATION.md create mode 100644 task/buildah-remote/0.3/README.md create mode 100644 task/buildah-remote/0.3/buildah-remote.yaml diff --git a/hack/generate-buildah-remote.sh b/hack/generate-buildah-remote.sh index b8544e1808..39535227c3 100755 --- a/hack/generate-buildah-remote.sh +++ b/hack/generate-buildah-remote.sh @@ -6,7 +6,7 @@ SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "${SCRIPTDIR}/../task-generator/remote" go build -o /tmp/remote-generator main.go -for version in 0.1 0.2; do +for version in 0.1 0.2 0.3; do /tmp/remote-generator --buildah-task="${SCRIPTDIR}/../task/buildah/${version}/buildah.yaml" \ --remote-task="${SCRIPTDIR}/../task/buildah-remote/${version}/buildah-remote.yaml" --task-version="$version" /tmp/remote-generator --buildah-task="${SCRIPTDIR}/../task/buildah-oci-ta/${version}/buildah-oci-ta.yaml" \ diff --git a/task/buildah-oci-ta/0.3/MIGRATION.md b/task/buildah-oci-ta/0.3/MIGRATION.md new file mode 100644 index 0000000000..3e2f4824cb --- /dev/null +++ b/task/buildah-oci-ta/0.3/MIGRATION.md @@ -0,0 +1,15 @@ +# Migration from 0.2 to 0.3 + +Version 0.3: + +Removes references to `jvm-build-service` +* Removes `analyse-dependencies-java-sbom` step +* Removes `SBOM_JAVA_COMPONENTS_COUNT` and `JAVA_COMMUNITY_DEPENDENCIES` results + +## Action from users + +The removed items are not in use, so their removal will not impact users. + +A Renovate bot PR will be created with a warning icon for this task. + +This is expected, and no action from users is needed. \ No newline at end of file diff --git a/task/buildah-oci-ta/0.3/README.md b/task/buildah-oci-ta/0.3/README.md new file mode 100644 index 0000000000..ea84a85ffc --- /dev/null +++ b/task/buildah-oci-ta/0.3/README.md @@ -0,0 +1,44 @@ +# buildah-oci-ta task + +Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. +In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. +When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. + +## Parameters +|name|description|default value|required| +|---|---|---|---| +|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false| +|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false| +|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false| +|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false| +|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false| +|CACHI2_ARTIFACT|The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.|""|false| +|COMMIT_SHA|The image is built from this commit.|""|false| +|CONTEXT|Path to the directory to use as context.|.|false| +|DOCKERFILE|Path to the Dockerfile to build.|./Dockerfile|false| +|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false| +|HERMETIC|Determines if build will be executed without network access.|false|false| +|IMAGE|Reference of the image buildah will produce.||true| +|IMAGE_EXPIRES_AFTER|Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.|""|false| +|LABELS|Additional key=value labels that should be applied to the image|[]|false| +|PREFETCH_INPUT|In case it is not empty, the prefetched content should be made available to the build.|""|false| +|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false| +|SOURCE_ARTIFACT|The Trusted Artifact URI pointing to the artifact with the application source code.||true| +|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false| +|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false| +|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false| +|TLSVERIFY|Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)|true|false| +|YUM_REPOS_D_FETCHED|Path in source workspace where dynamically-fetched repos are present|fetched.repos.d|false| +|YUM_REPOS_D_SRC|Path in the git repository in which yum repository files are stored|repos.d|false| +|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false| +|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false| +|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false| + +## Results +|name|description| +|---|---| +|IMAGE_DIGEST|Digest of the image just built| +|IMAGE_REF|Image reference of the built image| +|IMAGE_URL|Image repository and tag where the built image was pushed| +|SBOM_BLOB_URL|Reference of SBOM blob digest to enable digest-based verification from provenance| + diff --git a/task/buildah-oci-ta/0.3/buildah-oci-ta.yaml b/task/buildah-oci-ta/0.3/buildah-oci-ta.yaml new file mode 100644 index 0000000000..e204c760b8 --- /dev/null +++ b/task/buildah-oci-ta/0.3/buildah-oci-ta.yaml @@ -0,0 +1,693 @@ +--- +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: buildah-oci-ta + annotations: + tekton.dev/pipelines.minVersion: 0.12.1 + tekton.dev/tags: image-build, konflux + labels: + app.kubernetes.io/version: 0.2.1 + build.appstudio.redhat.com/build_type: docker +spec: + description: |- + Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. + In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. + When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. + params: + - name: ACTIVATION_KEY + description: Name of secret which contains subscription activation key + type: string + default: activation-key + - name: ADDITIONAL_SECRET + description: Name of a secret which will be made available to the build + with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET + type: string + default: does-not-exist + - name: ADD_CAPABILITIES + description: Comma separated list of extra capabilities to add when + running 'buildah build' + type: string + default: "" + - name: BUILD_ARGS + description: Array of --build-arg values ("arg=value" strings) + type: array + default: [] + - name: BUILD_ARGS_FILE + description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file + type: string + default: "" + - name: CACHI2_ARTIFACT + description: The Trusted Artifact URI pointing to the artifact with + the prefetched dependencies. + type: string + default: "" + - name: COMMIT_SHA + description: The image is built from this commit. + type: string + default: "" + - name: CONTEXT + description: Path to the directory to use as context. + type: string + default: . + - name: DOCKERFILE + description: Path to the Dockerfile to build. + type: string + default: ./Dockerfile + - name: ENTITLEMENT_SECRET + description: Name of secret which contains the entitlement certificates + type: string + default: etc-pki-entitlement + - name: HERMETIC + description: Determines if build will be executed without network access. + type: string + default: "false" + - name: IMAGE + description: Reference of the image buildah will produce. + type: string + - name: IMAGE_EXPIRES_AFTER + description: Delete image tag after specified time. Empty means to keep + the image tag. Time values could be something like 1h, 2d, 3w for + hours, days, and weeks, respectively. + type: string + default: "" + - name: LABELS + description: Additional key=value labels that should be applied to the + image + type: array + default: [] + - name: PREFETCH_INPUT + description: In case it is not empty, the prefetched content should + be made available to the build. + type: string + default: "" + - name: SKIP_UNUSED_STAGES + description: Whether to skip stages in Containerfile that seem unused + by subsequent stages + type: string + default: "true" + - name: SOURCE_ARTIFACT + description: The Trusted Artifact URI pointing to the artifact with + the application source code. + type: string + - name: SQUASH + description: Squash all new and previous layers added as a part of this + build, as per --squash + type: string + default: "false" + - name: STORAGE_DRIVER + description: Storage driver to configure for buildah + type: string + default: vfs + - name: TARGET_STAGE + description: Target stage in Dockerfile to build. If not specified, + the Dockerfile is processed entirely to (and including) its last stage. + type: string + default: "" + - name: TLSVERIFY + description: Verify the TLS on the registry endpoint (for push/pull + to a non-TLS registry) + type: string + default: "true" + - name: YUM_REPOS_D_FETCHED + description: Path in source workspace where dynamically-fetched repos + are present + default: fetched.repos.d + - name: YUM_REPOS_D_SRC + description: Path in the git repository in which yum repository files + are stored + default: repos.d + - name: YUM_REPOS_D_TARGET + description: Target path on the container in which yum repository files + should be made available + default: /etc/yum.repos.d + - name: caTrustConfigMapKey + description: The name of the key in the ConfigMap that contains the + CA bundle data. + type: string + default: ca-bundle.crt + - name: caTrustConfigMapName + description: The name of the ConfigMap to read CA bundle data from. + type: string + default: trusted-ca + results: + - name: IMAGE_DIGEST + description: Digest of the image just built + - name: IMAGE_REF + description: Image reference of the built image + - name: IMAGE_URL + description: Image repository and tag where the built image was pushed + - name: SBOM_BLOB_URL + description: Reference of SBOM blob digest to enable digest-based verification + from provenance + type: string + volumes: + - name: activation-key + secret: + optional: true + secretName: $(params.ACTIVATION_KEY) + - name: additional-secret + secret: + optional: true + secretName: $(params.ADDITIONAL_SECRET) + - name: etc-pki-entitlement + secret: + optional: true + secretName: $(params.ENTITLEMENT_SECRET) + - name: shared + emptyDir: {} + - name: trusted-ca + configMap: + items: + - key: $(params.caTrustConfigMapKey) + path: ca-bundle.crt + name: $(params.caTrustConfigMapName) + optional: true + - name: varlibcontainers + emptyDir: {} + - name: workdir + emptyDir: {} + stepTemplate: + computeResources: + limits: + cpu: "4" + memory: 4Gi + requests: + cpu: "1" + memory: 1Gi + env: + - name: ACTIVATION_KEY + value: $(params.ACTIVATION_KEY) + - name: ADDITIONAL_SECRET + value: $(params.ADDITIONAL_SECRET) + - name: ADD_CAPABILITIES + value: $(params.ADD_CAPABILITIES) + - name: BUILDAH_FORMAT + value: oci + - name: BUILD_ARGS_FILE + value: $(params.BUILD_ARGS_FILE) + - name: CONTEXT + value: $(params.CONTEXT) + - name: ENTITLEMENT_SECRET + value: $(params.ENTITLEMENT_SECRET) + - name: HERMETIC + value: $(params.HERMETIC) + - name: IMAGE + value: $(params.IMAGE) + - name: IMAGE_EXPIRES_AFTER + value: $(params.IMAGE_EXPIRES_AFTER) + - name: SKIP_UNUSED_STAGES + value: $(params.SKIP_UNUSED_STAGES) + - name: SOURCE_CODE_DIR + value: source + - name: SQUASH + value: $(params.SQUASH) + - name: STORAGE_DRIVER + value: $(params.STORAGE_DRIVER) + - name: TARGET_STAGE + value: $(params.TARGET_STAGE) + - name: TLSVERIFY + value: $(params.TLSVERIFY) + - name: YUM_REPOS_D_FETCHED + value: $(params.YUM_REPOS_D_FETCHED) + - name: YUM_REPOS_D_SRC + value: $(params.YUM_REPOS_D_SRC) + - name: YUM_REPOS_D_TARGET + value: $(params.YUM_REPOS_D_TARGET) + volumeMounts: + - mountPath: /shared + name: shared + - mountPath: /var/workdir + name: workdir + steps: + - name: use-trusted-artifact + image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:ff35e09ff5c89e54538b50abae241a765b2b7868f05d62c4835bebf0978f3659 + args: + - use + - $(params.SOURCE_ARTIFACT)=/var/workdir/source + - $(params.CACHI2_ARTIFACT)=/var/workdir/cachi2 + - name: build + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + args: + - --build-args + - $(params.BUILD_ARGS[*]) + - --labels + - $(params.LABELS[*]) + workingDir: /var/workdir + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /entitlement + name: etc-pki-entitlement + - mountPath: /activation-key + name: activation-key + - mountPath: /additional-secret + name: additional-secret + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + env: + - name: COMMIT_SHA + value: $(params.COMMIT_SHA) + - name: DOCKERFILE + value: $(params.DOCKERFILE) + script: | + #!/bin/bash + set -euo pipefail + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" + elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE" + elif [ -e "$DOCKERFILE" ]; then + # Instrumented builds (SAST) use this custom dockerffile step as their base + dockerfile_path="$DOCKERFILE" + elif echo "$DOCKERFILE" | grep -q "^https\?://"; then + echo "Fetch Dockerfile from $DOCKERFILE" + dockerfile_path=$(mktemp --suffix=-Dockerfile) + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE") + if [ $http_code != 200 ]; then + echo "No Dockerfile is fetched. Server responds $http_code" + exit 1 + fi + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore") + if [ $http_code = 200 ]; then + echo "Fetched .dockerignore from $DOCKERFILE.dockerignore" + mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore + fi + else + echo "Cannot find Dockerfile $DOCKERFILE" + exit 1 + fi + + dockerfile_copy=$(mktemp --tmpdir "$(basename "$dockerfile_path").XXXXXX") + cp "$dockerfile_path" "$dockerfile_copy" + + # Fixing group permission on /var/lib/containers + chown root:root /var/lib/containers + + sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf + + # Setting new namespace to run buildah - 2^32-2 + echo 'root:1:4294967294' | tee -a /etc/subuid >>/etc/subgid + + build_args=() + if [ -n "${BUILD_ARGS_FILE}" ]; then + # Parse BUILD_ARGS_FILE ourselves because dockerfile-json doesn't support it + echo "Parsing ARGs from $BUILD_ARGS_FILE" + mapfile -t build_args < <( + # https://www.mankier.com/1/buildah-build#--build-arg-file + # delete lines that start with # + # delete blank lines + sed -e '/^#/d' -e '/^\s*$/d' "${SOURCE_CODE_DIR}/${BUILD_ARGS_FILE}" + ) + fi + + LABELS=() + # Split `args` into two sets of arguments. + while [[ $# -gt 0 ]]; do + case $1 in + --build-args) + shift + # Note: this may result in multiple --build-arg=KEY=value flags with the same KEY being + # passed to buildah. In that case, the *last* occurrence takes precedence. This is why + # we append BUILD_ARGS after the content of the BUILD_ARGS_FILE + while [[ $# -gt 0 && $1 != --* ]]; do + build_args+=("$1") + shift + done + ;; + --labels) + shift + while [[ $# -gt 0 && $1 != --* ]]; do + LABELS+=("--label" "$1") + shift + done + ;; + *) + echo "unexpected argument: $1" >&2 + exit 2 + ;; + esac + done + + BUILD_ARG_FLAGS=() + for build_arg in "${build_args[@]}"; do + BUILD_ARG_FLAGS+=("--build-arg=$build_arg") + done + + dockerfile-json "${BUILD_ARG_FLAGS[@]}" "$dockerfile_copy" >/shared/parsed_dockerfile.json + BASE_IMAGES=$( + jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName | select(test("^oci-archive:") | not)' /shared/parsed_dockerfile.json + ) + + BUILDAH_ARGS=() + UNSHARE_ARGS=() + + if [ "${HERMETIC}" == "true" ]; then + BUILDAH_ARGS+=("--pull=never") + UNSHARE_ARGS+=("--net") + + for image in $BASE_IMAGES; do + unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image + done + echo "Build will be executed with network isolation" + fi + + if [ -n "${TARGET_STAGE}" ]; then + BUILDAH_ARGS+=("--target=${TARGET_STAGE}") + fi + + BUILDAH_ARGS+=("${BUILD_ARG_FLAGS[@]}") + + if [ -n "${ADD_CAPABILITIES}" ]; then + BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}") + fi + + if [ "${SQUASH}" == "true" ]; then + BUILDAH_ARGS+=("--squash") + fi + + if [ "${SKIP_UNUSED_STAGES}" != "true" ]; then + BUILDAH_ARGS+=("--skip-unused-stages=false") + fi + + VOLUME_MOUNTS=() + + if [ -f "/var/workdir/cachi2/cachi2.env" ]; then + cp -r "/var/workdir/cachi2" /tmp/ + chmod -R go+rwX /tmp/cachi2 + VOLUME_MOUNTS+=(--volume /tmp/cachi2:/cachi2) + # Read in the whole file (https://unix.stackexchange.com/questions/533277), then + # for each RUN ... line insert the cachi2.env command *after* any options like --mount + sed -E -i \ + -e 'H;1h;$!d;x' \ + -e 's@^\s*(run((\s|\\\n)+-\S+)*(\s|\\\n)+)@\1. /cachi2/cachi2.env \&\& \\\n @igM' \ + "$dockerfile_copy" + echo "Prefetched content will be made available" + + prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo" + if [ -f "$prefetched_repo_for_my_arch" ]; then + echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED" + mkdir -p "$YUM_REPOS_D_FETCHED" + cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED" + fi + fi + + # if yum repofiles stored in git, copy them to mount point outside the source dir + if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then + mkdir -p ${YUM_REPOS_D_FETCHED} + cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED} + fi + + # if anything in the repofiles mount point (either fetched or from git), mount it + if [ -d "${YUM_REPOS_D_FETCHED}" ]; then + chmod -R go+rwX ${YUM_REPOS_D_FETCHED} + mount_point=$(realpath ${YUM_REPOS_D_FETCHED}) + VOLUME_MOUNTS+=(--volume "${mount_point}:${YUM_REPOS_D_TARGET}") + fi + + DEFAULT_LABELS=( + "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')" + "--label" "architecture=$(uname -m)" + "--label" "vcs-type=git" + ) + [ -n "$COMMIT_SHA" ] && DEFAULT_LABELS+=("--label" "vcs-ref=$COMMIT_SHA") + [ -n "$IMAGE_EXPIRES_AFTER" ] && DEFAULT_LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER") + + # Concatenate defaults and explicit labels. If a label appears twice, the last one wins. + LABELS=("${DEFAULT_LABELS[@]}" "${LABELS[@]}") + + ACTIVATION_KEY_PATH="/activation-key" + ENTITLEMENT_PATH="/entitlement" + + # 0. if hermetic=true, skip all subscription related stuff + # 1. do not enable activation key and entitlement at same time. If both vars are provided, prefer activation key. + # 2. Activation-keys will be used when the key 'org' exists in the activation key secret. + # 3. try to pre-register and mount files to the correct location so that users do no need to modify Dockerfiles. + # 3. If the Dockerfile contains the string "subcription-manager register", add the activation-keys volume + # to buildah but don't pre-register for backwards compatibility. Mount an empty directory on + # shared emptydir volume to "/etc/pki/entitlement" to prevent certificates from being included + + if [ "${HERMETIC}" != "true" ] && [ -e /activation-key/org ]; then + cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key + mkdir -p /shared/rhsm/etc/pki/entitlement + mkdir -p /shared/rhsm/etc/pki/consumer + + VOLUME_MOUNTS+=(-v /tmp/activation-key:/activation-key + -v /shared/rhsm/etc/pki/entitlement:/etc/pki/entitlement:Z + -v /shared/rhsm/etc/pki/consumer:/etc/pki/consumer:Z) + echo "Adding activation key to the build" + + if ! grep -E "^[^#]*subscription-manager.[^#]*register" "$dockerfile_path"; then + # user is not running registration in the Containerfile: pre-register. + echo "Pre-registering with subscription manager." + subscription-manager register --org "$(cat /tmp/activation-key/org)" --activationkey "$(cat /tmp/activation-key/activationkey)" + trap 'subscription-manager unregister || true' EXIT + + # copy generated certificates to /shared volume + cp /etc/pki/entitlement/*.pem /shared/rhsm/etc/pki/entitlement + cp /etc/pki/consumer/*.pem /shared/rhsm/etc/pki/consumer + + # and then mount get /etc/rhsm/ca/redhat-uep.pem into /run/secrets/rhsm/ca + VOLUME_MOUNTS+=(--volume /etc/rhsm/ca/redhat-uep.pem:/etc/rhsm/ca/redhat-uep.pem:Z) + fi + + elif [ "${HERMETIC}" != "true" ] && find /entitlement -name "*.pem" >>null; then + cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement + VOLUME_MOUNTS+=(--volume /tmp/entitlement:/etc/pki/entitlement) + echo "Adding the entitlement to the build" + fi + + if [ -n "${ADDITIONAL_VOLUME_MOUNTS-}" ]; then + # ADDITIONAL_VOLUME_MOUNTS allows to specify more volumes for the build. + # Instrumented builds (SAST) use this step as their base and add some other tools. + while read -r volume_mount; do + VOLUME_MOUNTS+=("--volume=$volume_mount") + done <<<"$ADDITIONAL_VOLUME_MOUNTS" + fi + + ADDITIONAL_SECRET_PATH="/additional-secret" + ADDITIONAL_SECRET_TMP="/tmp/additional-secret" + if [ -d "$ADDITIONAL_SECRET_PATH" ]; then + cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP + while read -r filename; do + echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}" + BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}") + done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;) + fi + + # Prevent ShellCheck from giving a warning because 'image' is defined and 'IMAGE' is not. + declare IMAGE + + buildah_cmd_array=( + buildah build + "${VOLUME_MOUNTS[@]}" + "${BUILDAH_ARGS[@]}" + "${LABELS[@]}" + --tls-verify="$TLSVERIFY" --no-cache + --ulimit nofile=4096:4096 + -f "$dockerfile_copy" -t "$IMAGE" . + ) + buildah_cmd=$(printf "%q " "${buildah_cmd_array[@]}") + + if [ "${HERMETIC}" == "true" ]; then + # enabling loopback adapter enables Bazel builds to work in hermetic mode. + command="ip link set lo up && $buildah_cmd" + else + command="$buildah_cmd" + fi + + # disable host subcription manager integration + find /usr/share/rhel/secrets -type l -exec unlink {} \; + + unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- sh -c "$command" + + container=$(buildah from --pull-never "$IMAGE") + + # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later + if [ -f "/tmp/cachi2/output/bom.json" ]; then + echo "Making copy of sbom-cachi2.json" + cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json + fi + + buildah mount $container | tee /shared/container_path + # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners + find $(cat /shared/container_path) -xtype l -delete + echo $container >/shared/container_name + + touch /shared/base_images_digests + for image in $BASE_IMAGES; do + base_image_digest=$(buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image") + # In some cases, there might be BASE_IMAGES, but not any associated digest. This happens + # if buildah did not use that particular image during build because it was skipped + if [ -n "$base_image_digest" ]; then + echo "$image $base_image_digest" >>/shared/base_images_digests + fi + done + computeResources: + limits: + cpu: "4" + memory: 8Gi + requests: + cpu: "1" + memory: 2Gi + securityContext: + capabilities: + add: + - SETFCAP + - name: icm + image: quay.io/konflux-ci/icm-injection-scripts:latest@sha256:462980e94ba689b5f56c3d5dfb3358cd8c685300daf65a71532f11898935e7f1 + args: + - $(params.IMAGE) + workingDir: /var/workdir + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + securityContext: + capabilities: + add: + - SETFCAP + - name: push + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + workingDir: /var/workdir + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + script: | + #!/bin/bash + set -e + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + retries=5 + # Push to a unique tag based on the TaskRun name to avoid race conditions + echo "Pushing to ${IMAGE%:*}:${TASKRUN_NAME}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + "$IMAGE" \ + "docker://${IMAGE%:*}:$(context.taskRun.name)"; then + echo "Failed to push sbom image to ${IMAGE%:*}:$(context.taskRun.name) after ${retries} tries" + exit 1 + fi + + # Push to a tag based on the git revision + echo "Pushing to ${IMAGE}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + --digestfile "/var/workdir/image-digest" "$IMAGE" \ + "docker://$IMAGE"; then + echo "Failed to push sbom image to $IMAGE after ${retries} tries" + exit 1 + fi + + cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path) + echo -n "$IMAGE" | tee $(results.IMAGE_URL.path) + { + echo -n "${IMAGE}@" + cat "/var/workdir/image-digest" + } >"$(results.IMAGE_REF.path)" + securityContext: + capabilities: + add: + - SETFCAP + runAsUser: 0 + - name: sbom-syft-generate + image: registry.access.redhat.com/rh-syft-tech-preview/syft-rhel9:1.4.1@sha256:34d7065427085a31dc4949bd283c001b91794d427e1e4cdf1b21ea4faf9fee3f + workingDir: /var/workdir/source + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /shared + name: shared + script: | + echo "Running syft on the source directory" + syft dir:"/var/workdir/$SOURCE_CODE_DIR/$CONTEXT" --output cyclonedx-json="/var/workdir/sbom-source.json" + echo "Running syft on the image filesystem" + syft dir:"$(cat /shared/container_path)" --output cyclonedx-json="/var/workdir/sbom-image.json" + - name: prepare-sboms + image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:9f1fd11d9c3c517ecc112d192ad361d16ecf6ce00b83b109c93cf3d1c644a357 + workingDir: /var/workdir + script: | + echo "Merging contents of sbom-source.json and sbom-image.json into sbom-cyclonedx.json" + python3 /scripts/merge_syft_sboms.py + + if [ -f "sbom-cachi2.json" ]; then + echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json" + python3 /scripts/merge_cachi2_sboms.py sbom-cachi2.json sbom-cyclonedx.json >sbom-temp.json + mv sbom-temp.json sbom-cyclonedx.json + fi + + echo "Adding base images data to sbom-cyclonedx.json" + python3 /scripts/base_images_sbom_script.py \ + --sbom=sbom-cyclonedx.json \ + --parsed-dockerfile=/shared/parsed_dockerfile.json \ + --base-images-digests=/shared/base_images_digests + + echo "Adding image reference to sbom" + IMAGE_URL="$(cat "$(results.IMAGE_URL.path)")" + IMAGE_DIGEST="$(cat "$(results.IMAGE_DIGEST.path)")" + + python3 /scripts/add_image_reference.py \ + --image-url "$IMAGE_URL" \ + --image-digest "$IMAGE_DIGEST" \ + --input-file sbom-cyclonedx.json \ + --output-file /tmp/sbom-cyclonedx.tmp.json + + mv /tmp/sbom-cyclonedx.tmp.json sbom-cyclonedx.json + computeResources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + securityContext: + runAsUser: 0 + - name: upload-sbom + image: quay.io/konflux-ci/appstudio-utils:48c311af02858e2422d6229600e9959e496ddef1@sha256:91ddd999271f65d8ec8487b10f3dd378f81aa894e11b9af4d10639fd52bba7e8 + workingDir: /var/workdir + volumeMounts: + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + script: | + #!/bin/bash + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")" + + # Remove tag from IMAGE while allowing registry to contain a port number. + sbom_repo="${IMAGE%:*}" + sbom_digest="$(sha256sum sbom-cyclonedx.json | cut -d' ' -f1)" + # The SBOM_BLOB_URL is created by `cosign attach sbom`. + echo -n "${sbom_repo}@sha256:${sbom_digest}" | tee "$(results.SBOM_BLOB_URL.path)" + computeResources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi diff --git a/task/buildah-oci-ta/0.3/recipe.yaml b/task/buildah-oci-ta/0.3/recipe.yaml new file mode 100644 index 0000000000..c0162beb20 --- /dev/null +++ b/task/buildah-oci-ta/0.3/recipe.yaml @@ -0,0 +1,13 @@ +--- +base: ../../buildah/0.3/buildah.yaml +removeParams: + - BUILDER_IMAGE +add: + - use-source + - use-cachi2 +removeWorkspaces: + - source +replacements: + workspaces.source.path: /var/workdir +regexReplacements: + "/workspace(/.*)": /var/workdir$1 diff --git a/task/buildah-remote-oci-ta/0.3/MIGRATION.md b/task/buildah-remote-oci-ta/0.3/MIGRATION.md new file mode 100644 index 0000000000..3e2f4824cb --- /dev/null +++ b/task/buildah-remote-oci-ta/0.3/MIGRATION.md @@ -0,0 +1,15 @@ +# Migration from 0.2 to 0.3 + +Version 0.3: + +Removes references to `jvm-build-service` +* Removes `analyse-dependencies-java-sbom` step +* Removes `SBOM_JAVA_COMPONENTS_COUNT` and `JAVA_COMMUNITY_DEPENDENCIES` results + +## Action from users + +The removed items are not in use, so their removal will not impact users. + +A Renovate bot PR will be created with a warning icon for this task. + +This is expected, and no action from users is needed. \ No newline at end of file diff --git a/task/buildah-remote-oci-ta/0.3/README.md b/task/buildah-remote-oci-ta/0.3/README.md new file mode 100644 index 0000000000..33d81584da --- /dev/null +++ b/task/buildah-remote-oci-ta/0.3/README.md @@ -0,0 +1,46 @@ +# buildah-remote-oci-ta task + +Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. +In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. +When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. + +## Parameters +|name|description|default value|required| +|---|---|---|---| +|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false| +|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false| +|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false| +|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false| +|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false| +|CACHI2_ARTIFACT|The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.|""|false| +|COMMIT_SHA|The image is built from this commit.|""|false| +|CONTEXT|Path to the directory to use as context.|.|false| +|DOCKERFILE|Path to the Dockerfile to build.|./Dockerfile|false| +|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false| +|HERMETIC|Determines if build will be executed without network access.|false|false| +|IMAGE|Reference of the image buildah will produce.||true| +|IMAGE_EXPIRES_AFTER|Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.|""|false| +|LABELS|Additional key=value labels that should be applied to the image|[]|false| +|PREFETCH_INPUT|In case it is not empty, the prefetched content should be made available to the build.|""|false| +|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false| +|SOURCE_ARTIFACT|The Trusted Artifact URI pointing to the artifact with the application source code.||true| +|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false| +|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false| +|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false| +|TLSVERIFY|Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)|true|false| +|YUM_REPOS_D_FETCHED|Path in source workspace where dynamically-fetched repos are present|fetched.repos.d|false| +|YUM_REPOS_D_SRC|Path in the git repository in which yum repository files are stored|repos.d|false| +|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false| +|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false| +|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false| +|PLATFORM|The platform to build on||true| +|IMAGE_APPEND_PLATFORM|Whether to append a sanitized platform architecture on the IMAGE tag|false|false| + +## Results +|name|description| +|---|---| +|IMAGE_DIGEST|Digest of the image just built| +|IMAGE_REF|Image reference of the built image| +|IMAGE_URL|Image repository and tag where the built image was pushed| +|SBOM_BLOB_URL|Reference of SBOM blob digest to enable digest-based verification from provenance| + diff --git a/task/buildah-remote-oci-ta/0.3/buildah-remote-oci-ta.yaml b/task/buildah-remote-oci-ta/0.3/buildah-remote-oci-ta.yaml new file mode 100644 index 0000000000..1f43a92c7c --- /dev/null +++ b/task/buildah-remote-oci-ta/0.3/buildah-remote-oci-ta.yaml @@ -0,0 +1,837 @@ +apiVersion: tekton.dev/v1 +kind: Task +metadata: + annotations: + tekton.dev/pipelines.minVersion: 0.12.1 + tekton.dev/tags: image-build, konflux + creationTimestamp: null + labels: + app.kubernetes.io/version: 0.2.1 + build.appstudio.redhat.com/build_type: docker + name: buildah-remote-oci-ta +spec: + description: |- + Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. + In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. + When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. + params: + - default: activation-key + description: Name of secret which contains subscription activation key + name: ACTIVATION_KEY + type: string + - default: does-not-exist + description: Name of a secret which will be made available to the build with 'buildah + build --secret' at /run/secrets/$ADDITIONAL_SECRET + name: ADDITIONAL_SECRET + type: string + - default: "" + description: Comma separated list of extra capabilities to add when running 'buildah + build' + name: ADD_CAPABILITIES + type: string + - default: [] + description: Array of --build-arg values ("arg=value" strings) + name: BUILD_ARGS + type: array + - default: "" + description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: BUILD_ARGS_FILE + type: string + - default: "" + description: The Trusted Artifact URI pointing to the artifact with the prefetched + dependencies. + name: CACHI2_ARTIFACT + type: string + - default: "" + description: The image is built from this commit. + name: COMMIT_SHA + type: string + - default: . + description: Path to the directory to use as context. + name: CONTEXT + type: string + - default: ./Dockerfile + description: Path to the Dockerfile to build. + name: DOCKERFILE + type: string + - default: etc-pki-entitlement + description: Name of secret which contains the entitlement certificates + name: ENTITLEMENT_SECRET + type: string + - default: "false" + description: Determines if build will be executed without network access. + name: HERMETIC + type: string + - description: Reference of the image buildah will produce. + name: IMAGE + type: string + - default: "" + description: Delete image tag after specified time. Empty means to keep the image + tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, + respectively. + name: IMAGE_EXPIRES_AFTER + type: string + - default: [] + description: Additional key=value labels that should be applied to the image + name: LABELS + type: array + - default: "" + description: In case it is not empty, the prefetched content should be made available + to the build. + name: PREFETCH_INPUT + type: string + - default: "true" + description: Whether to skip stages in Containerfile that seem unused by subsequent + stages + name: SKIP_UNUSED_STAGES + type: string + - description: The Trusted Artifact URI pointing to the artifact with the application + source code. + name: SOURCE_ARTIFACT + type: string + - default: "false" + description: Squash all new and previous layers added as a part of this build, + as per --squash + name: SQUASH + type: string + - default: vfs + description: Storage driver to configure for buildah + name: STORAGE_DRIVER + type: string + - default: "" + description: Target stage in Dockerfile to build. If not specified, the Dockerfile + is processed entirely to (and including) its last stage. + name: TARGET_STAGE + type: string + - default: "true" + description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS + registry) + name: TLSVERIFY + type: string + - default: fetched.repos.d + description: Path in source workspace where dynamically-fetched repos are present + name: YUM_REPOS_D_FETCHED + - default: repos.d + description: Path in the git repository in which yum repository files are stored + name: YUM_REPOS_D_SRC + - default: /etc/yum.repos.d + description: Target path on the container in which yum repository files should + be made available + name: YUM_REPOS_D_TARGET + - default: ca-bundle.crt + description: The name of the key in the ConfigMap that contains the CA bundle + data. + name: caTrustConfigMapKey + type: string + - default: trusted-ca + description: The name of the ConfigMap to read CA bundle data from. + name: caTrustConfigMapName + type: string + - description: The platform to build on + name: PLATFORM + type: string + - default: "false" + description: Whether to append a sanitized platform architecture on the IMAGE + tag + name: IMAGE_APPEND_PLATFORM + type: string + results: + - description: Digest of the image just built + name: IMAGE_DIGEST + - description: Image reference of the built image + name: IMAGE_REF + - description: Image repository and tag where the built image was pushed + name: IMAGE_URL + - description: Reference of SBOM blob digest to enable digest-based verification + from provenance + name: SBOM_BLOB_URL + type: string + stepTemplate: + computeResources: + limits: + cpu: "4" + memory: 4Gi + requests: + cpu: "1" + memory: 1Gi + env: + - name: ACTIVATION_KEY + value: $(params.ACTIVATION_KEY) + - name: ADDITIONAL_SECRET + value: $(params.ADDITIONAL_SECRET) + - name: ADD_CAPABILITIES + value: $(params.ADD_CAPABILITIES) + - name: BUILDAH_FORMAT + value: oci + - name: BUILD_ARGS_FILE + value: $(params.BUILD_ARGS_FILE) + - name: CONTEXT + value: $(params.CONTEXT) + - name: ENTITLEMENT_SECRET + value: $(params.ENTITLEMENT_SECRET) + - name: HERMETIC + value: $(params.HERMETIC) + - name: IMAGE + value: $(params.IMAGE) + - name: IMAGE_EXPIRES_AFTER + value: $(params.IMAGE_EXPIRES_AFTER) + - name: SKIP_UNUSED_STAGES + value: $(params.SKIP_UNUSED_STAGES) + - name: SOURCE_CODE_DIR + value: source + - name: SQUASH + value: $(params.SQUASH) + - name: STORAGE_DRIVER + value: $(params.STORAGE_DRIVER) + - name: TARGET_STAGE + value: $(params.TARGET_STAGE) + - name: TLSVERIFY + value: $(params.TLSVERIFY) + - name: YUM_REPOS_D_FETCHED + value: $(params.YUM_REPOS_D_FETCHED) + - name: YUM_REPOS_D_SRC + value: $(params.YUM_REPOS_D_SRC) + - name: YUM_REPOS_D_TARGET + value: $(params.YUM_REPOS_D_TARGET) + - name: BUILDER_IMAGE + value: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + - name: PLATFORM + value: $(params.PLATFORM) + - name: IMAGE_APPEND_PLATFORM + value: $(params.IMAGE_APPEND_PLATFORM) + volumeMounts: + - mountPath: /shared + name: shared + - mountPath: /var/workdir + name: workdir + steps: + - args: + - use + - $(params.SOURCE_ARTIFACT)=/var/workdir/source + - $(params.CACHI2_ARTIFACT)=/var/workdir/cachi2 + computeResources: {} + image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:ff35e09ff5c89e54538b50abae241a765b2b7868f05d62c4835bebf0978f3659 + name: use-trusted-artifact + - args: + - --build-args + - $(params.BUILD_ARGS[*]) + - --labels + - $(params.LABELS[*]) + computeResources: + limits: + cpu: "4" + memory: 8Gi + requests: + cpu: "1" + memory: 2Gi + env: + - name: COMMIT_SHA + value: $(params.COMMIT_SHA) + - name: DOCKERFILE + value: $(params.DOCKERFILE) + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + name: build + script: |- + #!/bin/bash + set -e + set -o verbose + mkdir -p ~/.ssh + if [ -e "/ssh/error" ]; then + #no server could be provisioned + cat /ssh/error + exit 1 + fi + export SSH_HOST=$(cat /ssh/host) + + if [ "$SSH_HOST" == "localhost" ] ; then + IS_LOCALHOST=true + echo "Localhost detected; running build in cluster" + elif [ -e "/ssh/otp" ]; then + curl --cacert /ssh/otp-ca -XPOST -d @/ssh/otp $(cat /ssh/otp-server) >~/.ssh/id_rsa + echo "" >> ~/.ssh/id_rsa + else + cp /ssh/id_rsa ~/.ssh + fi + + mkdir -p scripts + + if ! [[ $IS_LOCALHOST ]]; then + chmod 0400 ~/.ssh/id_rsa + export BUILD_DIR=$(cat /ssh/user-dir) + export SSH_ARGS="-o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=10" + echo "$BUILD_DIR" + ssh $SSH_ARGS "$SSH_HOST" mkdir -p "$BUILD_DIR/workspaces" "$BUILD_DIR/scripts" "$BUILD_DIR/volumes" + + PORT_FORWARD="" + PODMAN_PORT_FORWARD="" + if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] ; then + PORT_FORWARD=" -L 80:$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR:80" + PODMAN_PORT_FORWARD=" -e JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR=localhost" + fi + + rsync -ra /shared/ "$SSH_HOST:$BUILD_DIR/volumes/shared/" + rsync -ra /var/workdir/ "$SSH_HOST:$BUILD_DIR/volumes/workdir/" + rsync -ra /entitlement/ "$SSH_HOST:$BUILD_DIR/volumes/etc-pki-entitlement/" + rsync -ra /activation-key/ "$SSH_HOST:$BUILD_DIR/volumes/activation-key/" + rsync -ra /additional-secret/ "$SSH_HOST:$BUILD_DIR/volumes/additional-secret/" + rsync -ra /mnt/trusted-ca/ "$SSH_HOST:$BUILD_DIR/volumes/trusted-ca/" + rsync -ra "$HOME/.docker/" "$SSH_HOST:$BUILD_DIR/.docker/" + rsync -ra "/tekton/results/" "$SSH_HOST:$BUILD_DIR/results/" + fi + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + + cat >scripts/script-build.sh <<'REMOTESSHEOF' + #!/bin/bash + set -euo pipefail + cd /var/workdir + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" + elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE" + elif [ -e "$DOCKERFILE" ]; then + # Instrumented builds (SAST) use this custom dockerffile step as their base + dockerfile_path="$DOCKERFILE" + elif echo "$DOCKERFILE" | grep -q "^https\?://"; then + echo "Fetch Dockerfile from $DOCKERFILE" + dockerfile_path=$(mktemp --suffix=-Dockerfile) + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE") + if [ $http_code != 200 ]; then + echo "No Dockerfile is fetched. Server responds $http_code" + exit 1 + fi + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore") + if [ $http_code = 200 ]; then + echo "Fetched .dockerignore from $DOCKERFILE.dockerignore" + mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore + fi + else + echo "Cannot find Dockerfile $DOCKERFILE" + exit 1 + fi + + dockerfile_copy=$(mktemp --tmpdir "$(basename "$dockerfile_path").XXXXXX") + cp "$dockerfile_path" "$dockerfile_copy" + + # Fixing group permission on /var/lib/containers + chown root:root /var/lib/containers + + sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf + + # Setting new namespace to run buildah - 2^32-2 + echo 'root:1:4294967294' | tee -a /etc/subuid >>/etc/subgid + + build_args=() + if [ -n "${BUILD_ARGS_FILE}" ]; then + # Parse BUILD_ARGS_FILE ourselves because dockerfile-json doesn't support it + echo "Parsing ARGs from $BUILD_ARGS_FILE" + mapfile -t build_args < <( + # https://www.mankier.com/1/buildah-build#--build-arg-file + # delete lines that start with # + # delete blank lines + sed -e '/^#/d' -e '/^\s*$/d' "${SOURCE_CODE_DIR}/${BUILD_ARGS_FILE}" + ) + fi + + LABELS=() + # Split `args` into two sets of arguments. + while [[ $# -gt 0 ]]; do + case $1 in + --build-args) + shift + # Note: this may result in multiple --build-arg=KEY=value flags with the same KEY being + # passed to buildah. In that case, the *last* occurrence takes precedence. This is why + # we append BUILD_ARGS after the content of the BUILD_ARGS_FILE + while [[ $# -gt 0 && $1 != --* ]]; do + build_args+=("$1") + shift + done + ;; + --labels) + shift + while [[ $# -gt 0 && $1 != --* ]]; do + LABELS+=("--label" "$1") + shift + done + ;; + *) + echo "unexpected argument: $1" >&2 + exit 2 + ;; + esac + done + + BUILD_ARG_FLAGS=() + for build_arg in "${build_args[@]}"; do + BUILD_ARG_FLAGS+=("--build-arg=$build_arg") + done + + dockerfile-json "${BUILD_ARG_FLAGS[@]}" "$dockerfile_copy" >/shared/parsed_dockerfile.json + BASE_IMAGES=$( + jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName | select(test("^oci-archive:") | not)' /shared/parsed_dockerfile.json + ) + + BUILDAH_ARGS=() + UNSHARE_ARGS=() + + if [ "${HERMETIC}" == "true" ]; then + BUILDAH_ARGS+=("--pull=never") + UNSHARE_ARGS+=("--net") + + for image in $BASE_IMAGES; do + unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image + done + echo "Build will be executed with network isolation" + fi + + if [ -n "${TARGET_STAGE}" ]; then + BUILDAH_ARGS+=("--target=${TARGET_STAGE}") + fi + + BUILDAH_ARGS+=("${BUILD_ARG_FLAGS[@]}") + + if [ -n "${ADD_CAPABILITIES}" ]; then + BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}") + fi + + if [ "${SQUASH}" == "true" ]; then + BUILDAH_ARGS+=("--squash") + fi + + if [ "${SKIP_UNUSED_STAGES}" != "true" ]; then + BUILDAH_ARGS+=("--skip-unused-stages=false") + fi + + VOLUME_MOUNTS=() + + if [ -f "/var/workdir/cachi2/cachi2.env" ]; then + cp -r "/var/workdir/cachi2" /tmp/ + chmod -R go+rwX /tmp/cachi2 + VOLUME_MOUNTS+=(--volume /tmp/cachi2:/cachi2) + # Read in the whole file (https://unix.stackexchange.com/questions/533277), then + # for each RUN ... line insert the cachi2.env command *after* any options like --mount + sed -E -i \ + -e 'H;1h;$!d;x' \ + -e 's@^\s*(run((\s|\\\n)+-\S+)*(\s|\\\n)+)@\1. /cachi2/cachi2.env \&\& \\\n @igM' \ + "$dockerfile_copy" + echo "Prefetched content will be made available" + + prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo" + if [ -f "$prefetched_repo_for_my_arch" ]; then + echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED" + mkdir -p "$YUM_REPOS_D_FETCHED" + cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED" + fi + fi + + # if yum repofiles stored in git, copy them to mount point outside the source dir + if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then + mkdir -p ${YUM_REPOS_D_FETCHED} + cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED} + fi + + # if anything in the repofiles mount point (either fetched or from git), mount it + if [ -d "${YUM_REPOS_D_FETCHED}" ]; then + chmod -R go+rwX ${YUM_REPOS_D_FETCHED} + mount_point=$(realpath ${YUM_REPOS_D_FETCHED}) + VOLUME_MOUNTS+=(--volume "${mount_point}:${YUM_REPOS_D_TARGET}") + fi + + DEFAULT_LABELS=( + "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')" + "--label" "architecture=$(uname -m)" + "--label" "vcs-type=git" + ) + [ -n "$COMMIT_SHA" ] && DEFAULT_LABELS+=("--label" "vcs-ref=$COMMIT_SHA") + [ -n "$IMAGE_EXPIRES_AFTER" ] && DEFAULT_LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER") + + # Concatenate defaults and explicit labels. If a label appears twice, the last one wins. + LABELS=("${DEFAULT_LABELS[@]}" "${LABELS[@]}") + + ACTIVATION_KEY_PATH="/activation-key" + ENTITLEMENT_PATH="/entitlement" + + # 0. if hermetic=true, skip all subscription related stuff + # 1. do not enable activation key and entitlement at same time. If both vars are provided, prefer activation key. + # 2. Activation-keys will be used when the key 'org' exists in the activation key secret. + # 3. try to pre-register and mount files to the correct location so that users do no need to modify Dockerfiles. + # 3. If the Dockerfile contains the string "subcription-manager register", add the activation-keys volume + # to buildah but don't pre-register for backwards compatibility. Mount an empty directory on + # shared emptydir volume to "/etc/pki/entitlement" to prevent certificates from being included + + if [ "${HERMETIC}" != "true" ] && [ -e /activation-key/org ]; then + cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key + mkdir -p /shared/rhsm/etc/pki/entitlement + mkdir -p /shared/rhsm/etc/pki/consumer + + VOLUME_MOUNTS+=(-v /tmp/activation-key:/activation-key + -v /shared/rhsm/etc/pki/entitlement:/etc/pki/entitlement:Z + -v /shared/rhsm/etc/pki/consumer:/etc/pki/consumer:Z) + echo "Adding activation key to the build" + + if ! grep -E "^[^#]*subscription-manager.[^#]*register" "$dockerfile_path"; then + # user is not running registration in the Containerfile: pre-register. + echo "Pre-registering with subscription manager." + subscription-manager register --org "$(cat /tmp/activation-key/org)" --activationkey "$(cat /tmp/activation-key/activationkey)" + trap 'subscription-manager unregister || true' EXIT + + # copy generated certificates to /shared volume + cp /etc/pki/entitlement/*.pem /shared/rhsm/etc/pki/entitlement + cp /etc/pki/consumer/*.pem /shared/rhsm/etc/pki/consumer + + # and then mount get /etc/rhsm/ca/redhat-uep.pem into /run/secrets/rhsm/ca + VOLUME_MOUNTS+=(--volume /etc/rhsm/ca/redhat-uep.pem:/etc/rhsm/ca/redhat-uep.pem:Z) + fi + + elif [ "${HERMETIC}" != "true" ] && find /entitlement -name "*.pem" >>null; then + cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement + VOLUME_MOUNTS+=(--volume /tmp/entitlement:/etc/pki/entitlement) + echo "Adding the entitlement to the build" + fi + + if [ -n "${ADDITIONAL_VOLUME_MOUNTS-}" ]; then + # ADDITIONAL_VOLUME_MOUNTS allows to specify more volumes for the build. + # Instrumented builds (SAST) use this step as their base and add some other tools. + while read -r volume_mount; do + VOLUME_MOUNTS+=("--volume=$volume_mount") + done <<<"$ADDITIONAL_VOLUME_MOUNTS" + fi + + ADDITIONAL_SECRET_PATH="/additional-secret" + ADDITIONAL_SECRET_TMP="/tmp/additional-secret" + if [ -d "$ADDITIONAL_SECRET_PATH" ]; then + cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP + while read -r filename; do + echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}" + BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}") + done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;) + fi + + # Prevent ShellCheck from giving a warning because 'image' is defined and 'IMAGE' is not. + declare IMAGE + + buildah_cmd_array=( + buildah build + "${VOLUME_MOUNTS[@]}" + "${BUILDAH_ARGS[@]}" + "${LABELS[@]}" + --tls-verify="$TLSVERIFY" --no-cache + --ulimit nofile=4096:4096 + -f "$dockerfile_copy" -t "$IMAGE" . + ) + buildah_cmd=$(printf "%q " "${buildah_cmd_array[@]}") + + if [ "${HERMETIC}" == "true" ]; then + # enabling loopback adapter enables Bazel builds to work in hermetic mode. + command="ip link set lo up && $buildah_cmd" + else + command="$buildah_cmd" + fi + + # disable host subcription manager integration + find /usr/share/rhel/secrets -type l -exec unlink {} \; + + unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- sh -c "$command" + + container=$(buildah from --pull-never "$IMAGE") + + # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later + if [ -f "/tmp/cachi2/output/bom.json" ]; then + echo "Making copy of sbom-cachi2.json" + cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json + fi + + buildah mount $container | tee /shared/container_path + # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners + find $(cat /shared/container_path) -xtype l -delete + echo $container >/shared/container_name + + touch /shared/base_images_digests + for image in $BASE_IMAGES; do + base_image_digest=$(buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image") + # In some cases, there might be BASE_IMAGES, but not any associated digest. This happens + # if buildah did not use that particular image during build because it was skipped + if [ -n "$base_image_digest" ]; then + echo "$image $base_image_digest" >>/shared/base_images_digests + fi + done + + buildah push "$IMAGE" "oci:konflux-final-image:$IMAGE" + REMOTESSHEOF + chmod +x scripts/script-build.sh + + if ! [[ $IS_LOCALHOST ]]; then + rsync -ra scripts "$SSH_HOST:$BUILD_DIR" + ssh $SSH_ARGS "$SSH_HOST" $PORT_FORWARD podman run $PODMAN_PORT_FORWARD \ + --tmpfs /run/secrets \ + -e ACTIVATION_KEY="$ACTIVATION_KEY" \ + -e ADDITIONAL_SECRET="$ADDITIONAL_SECRET" \ + -e ADD_CAPABILITIES="$ADD_CAPABILITIES" \ + -e BUILDAH_FORMAT="$BUILDAH_FORMAT" \ + -e BUILD_ARGS_FILE="$BUILD_ARGS_FILE" \ + -e CONTEXT="$CONTEXT" \ + -e ENTITLEMENT_SECRET="$ENTITLEMENT_SECRET" \ + -e HERMETIC="$HERMETIC" \ + -e IMAGE="$IMAGE" \ + -e IMAGE_EXPIRES_AFTER="$IMAGE_EXPIRES_AFTER" \ + -e SKIP_UNUSED_STAGES="$SKIP_UNUSED_STAGES" \ + -e SOURCE_CODE_DIR="$SOURCE_CODE_DIR" \ + -e SQUASH="$SQUASH" \ + -e STORAGE_DRIVER="$STORAGE_DRIVER" \ + -e TARGET_STAGE="$TARGET_STAGE" \ + -e TLSVERIFY="$TLSVERIFY" \ + -e YUM_REPOS_D_FETCHED="$YUM_REPOS_D_FETCHED" \ + -e YUM_REPOS_D_SRC="$YUM_REPOS_D_SRC" \ + -e YUM_REPOS_D_TARGET="$YUM_REPOS_D_TARGET" \ + -e COMMIT_SHA="$COMMIT_SHA" \ + -e DOCKERFILE="$DOCKERFILE" \ + -v "$BUILD_DIR/volumes/shared:/shared:Z" \ + -v "$BUILD_DIR/volumes/workdir:/var/workdir:Z" \ + -v "$BUILD_DIR/volumes/etc-pki-entitlement:/entitlement:Z" \ + -v "$BUILD_DIR/volumes/activation-key:/activation-key:Z" \ + -v "$BUILD_DIR/volumes/additional-secret:/additional-secret:Z" \ + -v "$BUILD_DIR/volumes/trusted-ca:/mnt/trusted-ca:Z" \ + -v "$BUILD_DIR/.docker/:/root/.docker:Z" \ + -v "$BUILD_DIR/results/:/tekton/results:Z" \ + -v "$BUILD_DIR/scripts:/scripts:Z" \ + --user=0 --rm "$BUILDER_IMAGE" /scripts/script-build.sh "$@" + rsync -ra "$SSH_HOST:$BUILD_DIR/volumes/shared/" /shared/ + rsync -ra "$SSH_HOST:$BUILD_DIR/volumes/workdir/" /var/workdir/ + rsync -ra "$SSH_HOST:$BUILD_DIR/results/" "/tekton/results/" + buildah pull "oci:konflux-final-image:$IMAGE" + else + bash scripts/script-build.sh "$@" + fi + buildah images + container=$(buildah from --pull-never "$IMAGE") + buildah mount "$container" | tee /shared/container_path + # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners + find $(cat /shared/container_path) -xtype l -delete + echo $container > /shared/container_name + securityContext: + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /entitlement + name: etc-pki-entitlement + - mountPath: /activation-key + name: activation-key + - mountPath: /additional-secret + name: additional-secret + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + - mountPath: /ssh + name: ssh + readOnly: true + workingDir: /var/workdir + - args: + - $(params.IMAGE) + computeResources: {} + image: quay.io/konflux-ci/icm-injection-scripts:latest@sha256:462980e94ba689b5f56c3d5dfb3358cd8c685300daf65a71532f11898935e7f1 + name: icm + securityContext: + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + workingDir: /var/workdir + - computeResources: {} + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + name: push + script: | + #!/bin/bash + set -e + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + retries=5 + # Push to a unique tag based on the TaskRun name to avoid race conditions + echo "Pushing to ${IMAGE%:*}:${TASKRUN_NAME}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + "$IMAGE" \ + "docker://${IMAGE%:*}:$(context.taskRun.name)"; then + echo "Failed to push sbom image to ${IMAGE%:*}:$(context.taskRun.name) after ${retries} tries" + exit 1 + fi + + # Push to a tag based on the git revision + echo "Pushing to ${IMAGE}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + --digestfile "/var/workdir/image-digest" "$IMAGE" \ + "docker://$IMAGE"; then + echo "Failed to push sbom image to $IMAGE after ${retries} tries" + exit 1 + fi + + cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path) + echo -n "$IMAGE" | tee $(results.IMAGE_URL.path) + { + echo -n "${IMAGE}@" + cat "/var/workdir/image-digest" + } >"$(results.IMAGE_REF.path)" + securityContext: + capabilities: + add: + - SETFCAP + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + workingDir: /var/workdir + - computeResources: {} + image: registry.access.redhat.com/rh-syft-tech-preview/syft-rhel9:1.4.1@sha256:34d7065427085a31dc4949bd283c001b91794d427e1e4cdf1b21ea4faf9fee3f + name: sbom-syft-generate + script: | + #!/bin/bash + set -e + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + echo "Running syft on the source directory" + syft dir:"/var/workdir/$SOURCE_CODE_DIR/$CONTEXT" --output cyclonedx-json="/var/workdir/sbom-source.json" + echo "Running syft on the image filesystem" + syft dir:"$(cat /shared/container_path)" --output cyclonedx-json="/var/workdir/sbom-image.json" + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /shared + name: shared + workingDir: /var/workdir/source + - computeResources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:9f1fd11d9c3c517ecc112d192ad361d16ecf6ce00b83b109c93cf3d1c644a357 + name: prepare-sboms + script: | + #!/bin/bash + set -e + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + echo "Merging contents of sbom-source.json and sbom-image.json into sbom-cyclonedx.json" + python3 /scripts/merge_syft_sboms.py + + if [ -f "sbom-cachi2.json" ]; then + echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json" + python3 /scripts/merge_cachi2_sboms.py sbom-cachi2.json sbom-cyclonedx.json >sbom-temp.json + mv sbom-temp.json sbom-cyclonedx.json + fi + + echo "Adding base images data to sbom-cyclonedx.json" + python3 /scripts/base_images_sbom_script.py \ + --sbom=sbom-cyclonedx.json \ + --parsed-dockerfile=/shared/parsed_dockerfile.json \ + --base-images-digests=/shared/base_images_digests + + echo "Adding image reference to sbom" + IMAGE_URL="$(cat "$(results.IMAGE_URL.path)")" + IMAGE_DIGEST="$(cat "$(results.IMAGE_DIGEST.path)")" + + python3 /scripts/add_image_reference.py \ + --image-url "$IMAGE_URL" \ + --image-digest "$IMAGE_DIGEST" \ + --input-file sbom-cyclonedx.json \ + --output-file /tmp/sbom-cyclonedx.tmp.json + + mv /tmp/sbom-cyclonedx.tmp.json sbom-cyclonedx.json + securityContext: + runAsUser: 0 + workingDir: /var/workdir + - computeResources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + image: quay.io/konflux-ci/appstudio-utils:48c311af02858e2422d6229600e9959e496ddef1@sha256:91ddd999271f65d8ec8487b10f3dd378f81aa894e11b9af4d10639fd52bba7e8 + name: upload-sbom + script: | + #!/bin/bash + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")" + + # Remove tag from IMAGE while allowing registry to contain a port number. + sbom_repo="${IMAGE%:*}" + sbom_digest="$(sha256sum sbom-cyclonedx.json | cut -d' ' -f1)" + # The SBOM_BLOB_URL is created by `cosign attach sbom`. + echo -n "${sbom_repo}@sha256:${sbom_digest}" | tee "$(results.SBOM_BLOB_URL.path)" + volumeMounts: + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + workingDir: /var/workdir + volumes: + - name: activation-key + secret: + optional: true + secretName: $(params.ACTIVATION_KEY) + - name: additional-secret + secret: + optional: true + secretName: $(params.ADDITIONAL_SECRET) + - name: etc-pki-entitlement + secret: + optional: true + secretName: $(params.ENTITLEMENT_SECRET) + - emptyDir: {} + name: shared + - configMap: + items: + - key: $(params.caTrustConfigMapKey) + path: ca-bundle.crt + name: $(params.caTrustConfigMapName) + optional: true + name: trusted-ca + - emptyDir: {} + name: varlibcontainers + - emptyDir: {} + name: workdir + - name: ssh + secret: + optional: false + secretName: multi-platform-ssh-$(context.taskRun.name) diff --git a/task/buildah-remote/0.3/MIGRATION.md b/task/buildah-remote/0.3/MIGRATION.md new file mode 100644 index 0000000000..3e2f4824cb --- /dev/null +++ b/task/buildah-remote/0.3/MIGRATION.md @@ -0,0 +1,15 @@ +# Migration from 0.2 to 0.3 + +Version 0.3: + +Removes references to `jvm-build-service` +* Removes `analyse-dependencies-java-sbom` step +* Removes `SBOM_JAVA_COMPONENTS_COUNT` and `JAVA_COMMUNITY_DEPENDENCIES` results + +## Action from users + +The removed items are not in use, so their removal will not impact users. + +A Renovate bot PR will be created with a warning icon for this task. + +This is expected, and no action from users is needed. \ No newline at end of file diff --git a/task/buildah-remote/0.3/README.md b/task/buildah-remote/0.3/README.md new file mode 100644 index 0000000000..cc739823cb --- /dev/null +++ b/task/buildah-remote/0.3/README.md @@ -0,0 +1,48 @@ +# buildah-remote task + +Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. +In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. +When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. + +## Parameters +|name|description|default value|required| +|---|---|---|---| +|IMAGE|Reference of the image buildah will produce.||true| +|DOCKERFILE|Path to the Dockerfile to build.|./Dockerfile|false| +|CONTEXT|Path to the directory to use as context.|.|false| +|TLSVERIFY|Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)|true|false| +|HERMETIC|Determines if build will be executed without network access.|false|false| +|PREFETCH_INPUT|In case it is not empty, the prefetched content should be made available to the build.|""|false| +|IMAGE_EXPIRES_AFTER|Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.|""|false| +|COMMIT_SHA|The image is built from this commit.|""|false| +|YUM_REPOS_D_SRC|Path in the git repository in which yum repository files are stored|repos.d|false| +|YUM_REPOS_D_FETCHED|Path in source workspace where dynamically-fetched repos are present|fetched.repos.d|false| +|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false| +|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false| +|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false| +|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false| +|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false| +|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false| +|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false| +|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false| +|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false| +|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false| +|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false| +|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false| +|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false| +|LABELS|Additional key=value labels that should be applied to the image|[]|false| +|PLATFORM|The platform to build on||true| +|IMAGE_APPEND_PLATFORM|Whether to append a sanitized platform architecture on the IMAGE tag|false|false| + +## Results +|name|description| +|---|---| +|IMAGE_DIGEST|Digest of the image just built| +|IMAGE_URL|Image repository and tag where the built image was pushed| +|IMAGE_REF|Image reference of the built image| +|SBOM_BLOB_URL|Reference of SBOM blob digest to enable digest-based verification from provenance| + +## Workspaces +|name|description|optional| +|---|---|---| +|source|Workspace containing the source code to build.|false| diff --git a/task/buildah-remote/0.3/buildah-remote.yaml b/task/buildah-remote/0.3/buildah-remote.yaml new file mode 100644 index 0000000000..d083fb83ab --- /dev/null +++ b/task/buildah-remote/0.3/buildah-remote.yaml @@ -0,0 +1,816 @@ +apiVersion: tekton.dev/v1 +kind: Task +metadata: + annotations: + tekton.dev/pipelines.minVersion: 0.12.1 + tekton.dev/tags: image-build, konflux + creationTimestamp: null + labels: + app.kubernetes.io/version: 0.2.1 + build.appstudio.redhat.com/build_type: docker + name: buildah-remote +spec: + description: |- + Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. + In addition, it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. + When prefetch-dependencies task is activated it is using its artifacts to run build in hermetic environment. + params: + - description: Reference of the image buildah will produce. + name: IMAGE + type: string + - default: ./Dockerfile + description: Path to the Dockerfile to build. + name: DOCKERFILE + type: string + - default: . + description: Path to the directory to use as context. + name: CONTEXT + type: string + - default: "true" + description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS + registry) + name: TLSVERIFY + type: string + - default: "false" + description: Determines if build will be executed without network access. + name: HERMETIC + type: string + - default: "" + description: In case it is not empty, the prefetched content should be made available + to the build. + name: PREFETCH_INPUT + type: string + - default: "" + description: Delete image tag after specified time. Empty means to keep the image + tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, + respectively. + name: IMAGE_EXPIRES_AFTER + type: string + - default: "" + description: The image is built from this commit. + name: COMMIT_SHA + type: string + - default: repos.d + description: Path in the git repository in which yum repository files are stored + name: YUM_REPOS_D_SRC + - default: fetched.repos.d + description: Path in source workspace where dynamically-fetched repos are present + name: YUM_REPOS_D_FETCHED + - default: /etc/yum.repos.d + description: Target path on the container in which yum repository files should + be made available + name: YUM_REPOS_D_TARGET + - default: "" + description: Target stage in Dockerfile to build. If not specified, the Dockerfile + is processed entirely to (and including) its last stage. + name: TARGET_STAGE + type: string + - default: etc-pki-entitlement + description: Name of secret which contains the entitlement certificates + name: ENTITLEMENT_SECRET + type: string + - default: activation-key + description: Name of secret which contains subscription activation key + name: ACTIVATION_KEY + type: string + - default: does-not-exist + description: Name of a secret which will be made available to the build with 'buildah + build --secret' at /run/secrets/$ADDITIONAL_SECRET + name: ADDITIONAL_SECRET + type: string + - default: [] + description: Array of --build-arg values ("arg=value" strings) + name: BUILD_ARGS + type: array + - default: "" + description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: BUILD_ARGS_FILE + type: string + - default: trusted-ca + description: The name of the ConfigMap to read CA bundle data from. + name: caTrustConfigMapName + type: string + - default: ca-bundle.crt + description: The name of the key in the ConfigMap that contains the CA bundle + data. + name: caTrustConfigMapKey + type: string + - default: "" + description: Comma separated list of extra capabilities to add when running 'buildah + build' + name: ADD_CAPABILITIES + type: string + - default: "false" + description: Squash all new and previous layers added as a part of this build, + as per --squash + name: SQUASH + type: string + - default: vfs + description: Storage driver to configure for buildah + name: STORAGE_DRIVER + type: string + - default: "true" + description: Whether to skip stages in Containerfile that seem unused by subsequent + stages + name: SKIP_UNUSED_STAGES + type: string + - default: [] + description: Additional key=value labels that should be applied to the image + name: LABELS + type: array + - description: The platform to build on + name: PLATFORM + type: string + - default: "false" + description: Whether to append a sanitized platform architecture on the IMAGE + tag + name: IMAGE_APPEND_PLATFORM + type: string + results: + - description: Digest of the image just built + name: IMAGE_DIGEST + - description: Image repository and tag where the built image was pushed + name: IMAGE_URL + - description: Image reference of the built image + name: IMAGE_REF + - description: Reference of SBOM blob digest to enable digest-based verification + from provenance + name: SBOM_BLOB_URL + type: string + stepTemplate: + computeResources: + limits: + cpu: "4" + memory: 4Gi + requests: + cpu: "1" + memory: 1Gi + env: + - name: BUILDAH_FORMAT + value: oci + - name: STORAGE_DRIVER + value: $(params.STORAGE_DRIVER) + - name: HERMETIC + value: $(params.HERMETIC) + - name: SOURCE_CODE_DIR + value: source + - name: CONTEXT + value: $(params.CONTEXT) + - name: IMAGE + value: $(params.IMAGE) + - name: TLSVERIFY + value: $(params.TLSVERIFY) + - name: IMAGE_EXPIRES_AFTER + value: $(params.IMAGE_EXPIRES_AFTER) + - name: YUM_REPOS_D_SRC + value: $(params.YUM_REPOS_D_SRC) + - name: YUM_REPOS_D_FETCHED + value: $(params.YUM_REPOS_D_FETCHED) + - name: YUM_REPOS_D_TARGET + value: $(params.YUM_REPOS_D_TARGET) + - name: TARGET_STAGE + value: $(params.TARGET_STAGE) + - name: ENTITLEMENT_SECRET + value: $(params.ENTITLEMENT_SECRET) + - name: ACTIVATION_KEY + value: $(params.ACTIVATION_KEY) + - name: ADDITIONAL_SECRET + value: $(params.ADDITIONAL_SECRET) + - name: BUILD_ARGS_FILE + value: $(params.BUILD_ARGS_FILE) + - name: ADD_CAPABILITIES + value: $(params.ADD_CAPABILITIES) + - name: SQUASH + value: $(params.SQUASH) + - name: SKIP_UNUSED_STAGES + value: $(params.SKIP_UNUSED_STAGES) + - name: BUILDER_IMAGE + value: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + - name: PLATFORM + value: $(params.PLATFORM) + - name: IMAGE_APPEND_PLATFORM + value: $(params.IMAGE_APPEND_PLATFORM) + volumeMounts: + - mountPath: /shared + name: shared + steps: + - args: + - --build-args + - $(params.BUILD_ARGS[*]) + - --labels + - $(params.LABELS[*]) + computeResources: + limits: + cpu: "4" + memory: 8Gi + requests: + cpu: "1" + memory: 2Gi + env: + - name: COMMIT_SHA + value: $(params.COMMIT_SHA) + - name: DOCKERFILE + value: $(params.DOCKERFILE) + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + name: build + script: |- + #!/bin/bash + set -e + set -o verbose + mkdir -p ~/.ssh + if [ -e "/ssh/error" ]; then + #no server could be provisioned + cat /ssh/error + exit 1 + fi + export SSH_HOST=$(cat /ssh/host) + + if [ "$SSH_HOST" == "localhost" ] ; then + IS_LOCALHOST=true + echo "Localhost detected; running build in cluster" + elif [ -e "/ssh/otp" ]; then + curl --cacert /ssh/otp-ca -XPOST -d @/ssh/otp $(cat /ssh/otp-server) >~/.ssh/id_rsa + echo "" >> ~/.ssh/id_rsa + else + cp /ssh/id_rsa ~/.ssh + fi + + mkdir -p scripts + + if ! [[ $IS_LOCALHOST ]]; then + chmod 0400 ~/.ssh/id_rsa + export BUILD_DIR=$(cat /ssh/user-dir) + export SSH_ARGS="-o StrictHostKeyChecking=no -o ServerAliveInterval=60 -o ServerAliveCountMax=10" + echo "$BUILD_DIR" + ssh $SSH_ARGS "$SSH_HOST" mkdir -p "$BUILD_DIR/workspaces" "$BUILD_DIR/scripts" "$BUILD_DIR/volumes" + + PORT_FORWARD="" + PODMAN_PORT_FORWARD="" + if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] ; then + PORT_FORWARD=" -L 80:$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR:80" + PODMAN_PORT_FORWARD=" -e JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR=localhost" + fi + + rsync -ra $(workspaces.source.path)/ "$SSH_HOST:$BUILD_DIR/workspaces/source/" + rsync -ra /shared/ "$SSH_HOST:$BUILD_DIR/volumes/shared/" + rsync -ra /entitlement/ "$SSH_HOST:$BUILD_DIR/volumes/etc-pki-entitlement/" + rsync -ra /activation-key/ "$SSH_HOST:$BUILD_DIR/volumes/activation-key/" + rsync -ra /additional-secret/ "$SSH_HOST:$BUILD_DIR/volumes/additional-secret/" + rsync -ra /mnt/trusted-ca/ "$SSH_HOST:$BUILD_DIR/volumes/trusted-ca/" + rsync -ra "$HOME/.docker/" "$SSH_HOST:$BUILD_DIR/.docker/" + rsync -ra "/tekton/results/" "$SSH_HOST:$BUILD_DIR/results/" + fi + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + + cat >scripts/script-build.sh <<'REMOTESSHEOF' + #!/bin/bash + set -euo pipefail + cd $(workspaces.source.path) + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" + elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then + dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE" + elif [ -e "$DOCKERFILE" ]; then + # Instrumented builds (SAST) use this custom dockerffile step as their base + dockerfile_path="$DOCKERFILE" + elif echo "$DOCKERFILE" | grep -q "^https\?://"; then + echo "Fetch Dockerfile from $DOCKERFILE" + dockerfile_path=$(mktemp --suffix=-Dockerfile) + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE") + if [ $http_code != 200 ]; then + echo "No Dockerfile is fetched. Server responds $http_code" + exit 1 + fi + http_code=$(curl -s -S -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore") + if [ $http_code = 200 ]; then + echo "Fetched .dockerignore from $DOCKERFILE.dockerignore" + mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore + fi + else + echo "Cannot find Dockerfile $DOCKERFILE" + exit 1 + fi + + dockerfile_copy=$(mktemp --tmpdir "$(basename "$dockerfile_path").XXXXXX") + cp "$dockerfile_path" "$dockerfile_copy" + + # Fixing group permission on /var/lib/containers + chown root:root /var/lib/containers + + sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf + + # Setting new namespace to run buildah - 2^32-2 + echo 'root:1:4294967294' | tee -a /etc/subuid >> /etc/subgid + + build_args=() + if [ -n "${BUILD_ARGS_FILE}" ]; then + # Parse BUILD_ARGS_FILE ourselves because dockerfile-json doesn't support it + echo "Parsing ARGs from $BUILD_ARGS_FILE" + mapfile -t build_args < <( + # https://www.mankier.com/1/buildah-build#--build-arg-file + # delete lines that start with # + # delete blank lines + sed -e '/^#/d' -e '/^\s*$/d' "${SOURCE_CODE_DIR}/${BUILD_ARGS_FILE}" + ) + fi + + LABELS=() + # Split `args` into two sets of arguments. + while [[ $# -gt 0 ]]; do + case $1 in + --build-args) + shift + # Note: this may result in multiple --build-arg=KEY=value flags with the same KEY being + # passed to buildah. In that case, the *last* occurrence takes precedence. This is why + # we append BUILD_ARGS after the content of the BUILD_ARGS_FILE + while [[ $# -gt 0 && $1 != --* ]]; do build_args+=("$1"); shift; done + ;; + --labels) + shift + while [[ $# -gt 0 && $1 != --* ]]; do LABELS+=("--label" "$1"); shift; done + ;; + *) + echo "unexpected argument: $1" >&2 + exit 2 + ;; + esac + done + + BUILD_ARG_FLAGS=() + for build_arg in "${build_args[@]}"; do + BUILD_ARG_FLAGS+=("--build-arg=$build_arg") + done + + dockerfile-json "${BUILD_ARG_FLAGS[@]}" "$dockerfile_copy" > /shared/parsed_dockerfile.json + BASE_IMAGES=$( + jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName | select(test("^oci-archive:") | not)' /shared/parsed_dockerfile.json + ) + + BUILDAH_ARGS=() + UNSHARE_ARGS=() + + if [ "${HERMETIC}" == "true" ]; then + BUILDAH_ARGS+=("--pull=never") + UNSHARE_ARGS+=("--net") + + for image in $BASE_IMAGES; do + unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image + done + echo "Build will be executed with network isolation" + fi + + if [ -n "${TARGET_STAGE}" ]; then + BUILDAH_ARGS+=("--target=${TARGET_STAGE}") + fi + + BUILDAH_ARGS+=("${BUILD_ARG_FLAGS[@]}") + + if [ -n "${ADD_CAPABILITIES}" ]; then + BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}") + fi + + if [ "${SQUASH}" == "true" ]; then + BUILDAH_ARGS+=("--squash") + fi + + if [ "${SKIP_UNUSED_STAGES}" != "true" ] ; then + BUILDAH_ARGS+=("--skip-unused-stages=false") + fi + + VOLUME_MOUNTS=() + + if [ -f "$(workspaces.source.path)/cachi2/cachi2.env" ]; then + cp -r "$(workspaces.source.path)/cachi2" /tmp/ + chmod -R go+rwX /tmp/cachi2 + VOLUME_MOUNTS+=(--volume /tmp/cachi2:/cachi2) + # Read in the whole file (https://unix.stackexchange.com/questions/533277), then + # for each RUN ... line insert the cachi2.env command *after* any options like --mount + sed -E -i \ + -e 'H;1h;$!d;x' \ + -e 's@^\s*(run((\s|\\\n)+-\S+)*(\s|\\\n)+)@\1. /cachi2/cachi2.env \&\& \\\n @igM' \ + "$dockerfile_copy" + echo "Prefetched content will be made available" + + prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo" + if [ -f "$prefetched_repo_for_my_arch" ]; then + echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED" + mkdir -p "$YUM_REPOS_D_FETCHED" + cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED" + fi + fi + + # if yum repofiles stored in git, copy them to mount point outside the source dir + if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then + mkdir -p ${YUM_REPOS_D_FETCHED} + cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED} + fi + + # if anything in the repofiles mount point (either fetched or from git), mount it + if [ -d "${YUM_REPOS_D_FETCHED}" ]; then + chmod -R go+rwX ${YUM_REPOS_D_FETCHED} + mount_point=$(realpath ${YUM_REPOS_D_FETCHED}) + VOLUME_MOUNTS+=(--volume "${mount_point}:${YUM_REPOS_D_TARGET}") + fi + + DEFAULT_LABELS=( + "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')" + "--label" "architecture=$(uname -m)" + "--label" "vcs-type=git" + ) + [ -n "$COMMIT_SHA" ] && DEFAULT_LABELS+=("--label" "vcs-ref=$COMMIT_SHA") + [ -n "$IMAGE_EXPIRES_AFTER" ] && DEFAULT_LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER") + + # Concatenate defaults and explicit labels. If a label appears twice, the last one wins. + LABELS=("${DEFAULT_LABELS[@]}" "${LABELS[@]}") + + ACTIVATION_KEY_PATH="/activation-key" + ENTITLEMENT_PATH="/entitlement" + + # 0. if hermetic=true, skip all subscription related stuff + # 1. do not enable activation key and entitlement at same time. If both vars are provided, prefer activation key. + # 2. Activation-keys will be used when the key 'org' exists in the activation key secret. + # 3. try to pre-register and mount files to the correct location so that users do no need to modify Dockerfiles. + # 3. If the Dockerfile contains the string "subcription-manager register", add the activation-keys volume + # to buildah but don't pre-register for backwards compatibility. Mount an empty directory on + # shared emptydir volume to "/etc/pki/entitlement" to prevent certificates from being included + + if [ "${HERMETIC}" != "true" ] && [ -e /activation-key/org ]; then + cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key + mkdir -p /shared/rhsm/etc/pki/entitlement + mkdir -p /shared/rhsm/etc/pki/consumer + + VOLUME_MOUNTS+=(-v /tmp/activation-key:/activation-key \ + -v /shared/rhsm/etc/pki/entitlement:/etc/pki/entitlement:Z \ + -v /shared/rhsm/etc/pki/consumer:/etc/pki/consumer:Z) + echo "Adding activation key to the build" + + if ! grep -E "^[^#]*subscription-manager.[^#]*register" "$dockerfile_path"; then + # user is not running registration in the Containerfile: pre-register. + echo "Pre-registering with subscription manager." + subscription-manager register --org "$(cat /tmp/activation-key/org)" --activationkey "$(cat /tmp/activation-key/activationkey)" + trap 'subscription-manager unregister || true' EXIT + + # copy generated certificates to /shared volume + cp /etc/pki/entitlement/*.pem /shared/rhsm/etc/pki/entitlement + cp /etc/pki/consumer/*.pem /shared/rhsm/etc/pki/consumer + + # and then mount get /etc/rhsm/ca/redhat-uep.pem into /run/secrets/rhsm/ca + VOLUME_MOUNTS+=(--volume /etc/rhsm/ca/redhat-uep.pem:/etc/rhsm/ca/redhat-uep.pem:Z) + fi + + elif [ "${HERMETIC}" != "true" ] && find /entitlement -name "*.pem" >> null; then + cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement + VOLUME_MOUNTS+=(--volume /tmp/entitlement:/etc/pki/entitlement) + echo "Adding the entitlement to the build" + fi + + if [ -n "${ADDITIONAL_VOLUME_MOUNTS-}" ]; then + # ADDITIONAL_VOLUME_MOUNTS allows to specify more volumes for the build. + # Instrumented builds (SAST) use this step as their base and add some other tools. + while read -r volume_mount; do + VOLUME_MOUNTS+=("--volume=$volume_mount") + done <<< "$ADDITIONAL_VOLUME_MOUNTS" + fi + + ADDITIONAL_SECRET_PATH="/additional-secret" + ADDITIONAL_SECRET_TMP="/tmp/additional-secret" + if [ -d "$ADDITIONAL_SECRET_PATH" ]; then + cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP + while read -r filename; do + echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}" + BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}") + done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;) + fi + + # Prevent ShellCheck from giving a warning because 'image' is defined and 'IMAGE' is not. + declare IMAGE + + buildah_cmd_array=( + buildah build + "${VOLUME_MOUNTS[@]}" + "${BUILDAH_ARGS[@]}" + "${LABELS[@]}" + --tls-verify="$TLSVERIFY" --no-cache + --ulimit nofile=4096:4096 + -f "$dockerfile_copy" -t "$IMAGE" . + ) + buildah_cmd=$(printf "%q " "${buildah_cmd_array[@]}") + + if [ "${HERMETIC}" == "true" ]; then + # enabling loopback adapter enables Bazel builds to work in hermetic mode. + command="ip link set lo up && $buildah_cmd" + else + command="$buildah_cmd" + fi + + # disable host subcription manager integration + find /usr/share/rhel/secrets -type l -exec unlink {} \; + + unshare -Uf "${UNSHARE_ARGS[@]}" --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w "${SOURCE_CODE_DIR}/$CONTEXT" -- sh -c "$command" + + container=$(buildah from --pull-never "$IMAGE") + + # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later + if [ -f "/tmp/cachi2/output/bom.json" ]; then + echo "Making copy of sbom-cachi2.json" + cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json + fi + + buildah mount $container | tee /shared/container_path + # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners + find $(cat /shared/container_path) -xtype l -delete + echo $container > /shared/container_name + + touch /shared/base_images_digests + for image in $BASE_IMAGES; do + base_image_digest=$(buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image") + # In some cases, there might be BASE_IMAGES, but not any associated digest. This happens + # if buildah did not use that particular image during build because it was skipped + if [ -n "$base_image_digest" ]; then + echo "$image $base_image_digest" >> /shared/base_images_digests + fi + done + + buildah push "$IMAGE" "oci:konflux-final-image:$IMAGE" + REMOTESSHEOF + chmod +x scripts/script-build.sh + + if ! [[ $IS_LOCALHOST ]]; then + rsync -ra scripts "$SSH_HOST:$BUILD_DIR" + ssh $SSH_ARGS "$SSH_HOST" $PORT_FORWARD podman run $PODMAN_PORT_FORWARD \ + --tmpfs /run/secrets \ + -e BUILDAH_FORMAT="$BUILDAH_FORMAT" \ + -e STORAGE_DRIVER="$STORAGE_DRIVER" \ + -e HERMETIC="$HERMETIC" \ + -e SOURCE_CODE_DIR="$SOURCE_CODE_DIR" \ + -e CONTEXT="$CONTEXT" \ + -e IMAGE="$IMAGE" \ + -e TLSVERIFY="$TLSVERIFY" \ + -e IMAGE_EXPIRES_AFTER="$IMAGE_EXPIRES_AFTER" \ + -e YUM_REPOS_D_SRC="$YUM_REPOS_D_SRC" \ + -e YUM_REPOS_D_FETCHED="$YUM_REPOS_D_FETCHED" \ + -e YUM_REPOS_D_TARGET="$YUM_REPOS_D_TARGET" \ + -e TARGET_STAGE="$TARGET_STAGE" \ + -e ENTITLEMENT_SECRET="$ENTITLEMENT_SECRET" \ + -e ACTIVATION_KEY="$ACTIVATION_KEY" \ + -e ADDITIONAL_SECRET="$ADDITIONAL_SECRET" \ + -e BUILD_ARGS_FILE="$BUILD_ARGS_FILE" \ + -e ADD_CAPABILITIES="$ADD_CAPABILITIES" \ + -e SQUASH="$SQUASH" \ + -e SKIP_UNUSED_STAGES="$SKIP_UNUSED_STAGES" \ + -e COMMIT_SHA="$COMMIT_SHA" \ + -e DOCKERFILE="$DOCKERFILE" \ + -v "$BUILD_DIR/workspaces/source:$(workspaces.source.path):Z" \ + -v "$BUILD_DIR/volumes/shared:/shared:Z" \ + -v "$BUILD_DIR/volumes/etc-pki-entitlement:/entitlement:Z" \ + -v "$BUILD_DIR/volumes/activation-key:/activation-key:Z" \ + -v "$BUILD_DIR/volumes/additional-secret:/additional-secret:Z" \ + -v "$BUILD_DIR/volumes/trusted-ca:/mnt/trusted-ca:Z" \ + -v "$BUILD_DIR/.docker/:/root/.docker:Z" \ + -v "$BUILD_DIR/results/:/tekton/results:Z" \ + -v "$BUILD_DIR/scripts:/scripts:Z" \ + --user=0 --rm "$BUILDER_IMAGE" /scripts/script-build.sh "$@" + rsync -ra "$SSH_HOST:$BUILD_DIR/workspaces/source/" "$(workspaces.source.path)/" + rsync -ra "$SSH_HOST:$BUILD_DIR/volumes/shared/" /shared/ + rsync -ra "$SSH_HOST:$BUILD_DIR/results/" "/tekton/results/" + buildah pull "oci:konflux-final-image:$IMAGE" + else + bash scripts/script-build.sh "$@" + fi + buildah images + container=$(buildah from --pull-never "$IMAGE") + buildah mount "$container" | tee /shared/container_path + # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners + find $(cat /shared/container_path) -xtype l -delete + echo $container > /shared/container_name + securityContext: + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /entitlement + name: etc-pki-entitlement + - mountPath: /activation-key + name: activation-key + - mountPath: /additional-secret + name: additional-secret + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + - mountPath: /ssh + name: ssh + readOnly: true + workingDir: $(workspaces.source.path) + - args: + - $(params.IMAGE) + computeResources: {} + image: quay.io/konflux-ci/icm-injection-scripts:latest@sha256:462980e94ba689b5f56c3d5dfb3358cd8c685300daf65a71532f11898935e7f1 + name: icm + securityContext: + capabilities: + add: + - SETFCAP + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + workingDir: $(workspaces.source.path) + - computeResources: {} + image: quay.io/konflux-ci/buildah-task:latest@sha256:b2d6c32d1e05e91920cd4475b2761d58bb7ee11ad5dff3ecb59831c7572b4d0c + name: push + script: | + #!/bin/bash + set -e + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + retries=5 + # Push to a unique tag based on the TaskRun name to avoid race conditions + echo "Pushing to ${IMAGE%:*}:${TASKRUN_NAME}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + "$IMAGE" \ + "docker://${IMAGE%:*}:$(context.taskRun.name)"; + then + echo "Failed to push sbom image to ${IMAGE%:*}:$(context.taskRun.name) after ${retries} tries" + exit 1 + fi + + # Push to a tag based on the git revision + echo "Pushing to ${IMAGE}" + if ! buildah push \ + --retry "$retries" \ + --tls-verify="$TLSVERIFY" \ + --digestfile "$(workspaces.source.path)/image-digest" "$IMAGE" \ + "docker://$IMAGE"; + then + echo "Failed to push sbom image to $IMAGE after ${retries} tries" + exit 1 + fi + + cat "$(workspaces.source.path)"/image-digest | tee $(results.IMAGE_DIGEST.path) + echo -n "$IMAGE" | tee $(results.IMAGE_URL.path) + { + echo -n "${IMAGE}@" + cat "$(workspaces.source.path)/image-digest" + } > "$(results.IMAGE_REF.path)" + securityContext: + capabilities: + add: + - SETFCAP + runAsUser: 0 + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + workingDir: $(workspaces.source.path) + - computeResources: {} + image: registry.access.redhat.com/rh-syft-tech-preview/syft-rhel9:1.4.1@sha256:34d7065427085a31dc4949bd283c001b91794d427e1e4cdf1b21ea4faf9fee3f + name: sbom-syft-generate + script: | + #!/bin/bash + set -e + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + echo "Running syft on the source directory" + syft dir:"$(workspaces.source.path)/$SOURCE_CODE_DIR/$CONTEXT" --output cyclonedx-json="$(workspaces.source.path)/sbom-source.json" + echo "Running syft on the image filesystem" + syft dir:"$(cat /shared/container_path)" --output cyclonedx-json="$(workspaces.source.path)/sbom-image.json" + volumeMounts: + - mountPath: /var/lib/containers + name: varlibcontainers + - mountPath: /shared + name: shared + workingDir: $(workspaces.source.path)/source + - computeResources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:9f1fd11d9c3c517ecc112d192ad361d16ecf6ce00b83b109c93cf3d1c644a357 + name: prepare-sboms + script: | + #!/bin/bash + set -e + if [ "${IMAGE_APPEND_PLATFORM}" == "true" ]; then + IMAGE="${IMAGE}-${PLATFORM//[^a-zA-Z0-9]/-}" + export IMAGE + fi + echo "Merging contents of sbom-source.json and sbom-image.json into sbom-cyclonedx.json" + python3 /scripts/merge_syft_sboms.py + + if [ -f "sbom-cachi2.json" ]; then + echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json" + python3 /scripts/merge_cachi2_sboms.py sbom-cachi2.json sbom-cyclonedx.json > sbom-temp.json + mv sbom-temp.json sbom-cyclonedx.json + fi + + echo "Adding base images data to sbom-cyclonedx.json" + python3 /scripts/base_images_sbom_script.py \ + --sbom=sbom-cyclonedx.json \ + --parsed-dockerfile=/shared/parsed_dockerfile.json \ + --base-images-digests=/shared/base_images_digests + + echo "Adding image reference to sbom" + IMAGE_URL="$(cat "$(results.IMAGE_URL.path)")" + IMAGE_DIGEST="$(cat "$(results.IMAGE_DIGEST.path)")" + + python3 /scripts/add_image_reference.py \ + --image-url "$IMAGE_URL" \ + --image-digest "$IMAGE_DIGEST" \ + --input-file sbom-cyclonedx.json \ + --output-file /tmp/sbom-cyclonedx.tmp.json + + mv /tmp/sbom-cyclonedx.tmp.json sbom-cyclonedx.json + securityContext: + runAsUser: 0 + workingDir: $(workspaces.source.path) + - computeResources: + limits: + cpu: 200m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + image: quay.io/konflux-ci/appstudio-utils:48c311af02858e2422d6229600e9959e496ddef1@sha256:91ddd999271f65d8ec8487b10f3dd378f81aa894e11b9af4d10639fd52bba7e8 + name: upload-sbom + script: | + #!/bin/bash + + ca_bundle=/mnt/trusted-ca/ca-bundle.crt + if [ -f "$ca_bundle" ]; then + echo "INFO: Using mounted CA bundle: $ca_bundle" + cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors + update-ca-trust + fi + + cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")" + + # Remove tag from IMAGE while allowing registry to contain a port number. + sbom_repo="${IMAGE%:*}" + sbom_digest="$(sha256sum sbom-cyclonedx.json | cut -d' ' -f1)" + # The SBOM_BLOB_URL is created by `cosign attach sbom`. + echo -n "${sbom_repo}@sha256:${sbom_digest}" | tee "$(results.SBOM_BLOB_URL.path)" + volumeMounts: + - mountPath: /mnt/trusted-ca + name: trusted-ca + readOnly: true + workingDir: $(workspaces.source.path) + volumes: + - emptyDir: {} + name: varlibcontainers + - emptyDir: {} + name: shared + - name: etc-pki-entitlement + secret: + optional: true + secretName: $(params.ENTITLEMENT_SECRET) + - name: activation-key + secret: + optional: true + secretName: $(params.ACTIVATION_KEY) + - name: additional-secret + secret: + optional: true + secretName: $(params.ADDITIONAL_SECRET) + - configMap: + items: + - key: $(params.caTrustConfigMapKey) + path: ca-bundle.crt + name: $(params.caTrustConfigMapName) + optional: true + name: trusted-ca + - name: ssh + secret: + optional: false + secretName: multi-platform-ssh-$(context.taskRun.name) + workspaces: + - description: Workspace containing the source code to build. + name: source From 4c4b3c982293c95c5e3eb083b347a8b09570ff01 Mon Sep 17 00:00:00 2001 From: Tomas Nevrlka Date: Fri, 20 Dec 2024 10:46:07 +0100 Subject: [PATCH 4/5] block buildah-remote-oci-ta size increase for 0.3 - There is a check ensuring that the size buildah-remote-oci-ta doesn't increase - It only works for version 0.2, make it work for 0.3 too --- .github/workflows/temp-block-buildah.yaml | 50 +++++++++++++---------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/.github/workflows/temp-block-buildah.yaml b/.github/workflows/temp-block-buildah.yaml index 108a9339a4..8b5cc5a589 100644 --- a/.github/workflows/temp-block-buildah.yaml +++ b/.github/workflows/temp-block-buildah.yaml @@ -16,25 +16,33 @@ jobs: #!/bin/bash set -euo pipefail - buildah_remote_oci_ta=task/buildah-remote-oci-ta/0.2/buildah-remote-oci-ta.yaml - # 34172 is the largest that the file has ever been *and worked*. - # 34200 is known to be too large (see b2f800cc603ec0907ad2b3962d46919a535e158e, - # which had to be reverted). The actual limit is somewhere in between. - safe_size=34172 - current_size=$(wc -c < "$buildah_remote_oci_ta") - - if [[ "$current_size" -gt "$safe_size" ]]; then - cat << EOF >&2 - This PR increases the size of $buildah_remote_oci_ta beyond the known safe limit. - - safe_size=$safe_size - current_size=$current_size - - Due to https://github.com/tektoncd/pipeline/issues/8388, this is risky; - the resulting bundle may not be resolvable by Tekton. - - Until the fix for the above issue is deployed in Konflux, your PR is blocked. - Sorry! + function check_size() { + # 34172 is the largest that the file has ever been *and worked*. + # 34200 is known to be too large (see b2f800cc603ec0907ad2b3962d46919a535e158e, + # which had to be reverted). The actual limit is somewhere in between. + local file=$1 + safe_size=34172 + current_size=$(wc -c < "$file") + + if [[ "$current_size" -gt "$safe_size" ]]; then + cat << EOF >&2 + This PR increases the size of $file beyond the known safe limit. + + safe_size=$safe_size + current_size=$current_size + + Due to https://github.com/tektoncd/pipeline/issues/8388, this is risky; + the resulting bundle may not be resolvable by Tekton. + + Until the fix for the above issue is deployed in Konflux, your PR is blocked. + Sorry! EOF - exit 1 - fi + exit 1 + fi + } + + for version in 0.2 0.3; + do + buildah_remote_oci_ta="task/buildah-remote-oci-ta/${version}/buildah-remote-oci-ta.yaml" + check_size "$buildah_remote_oci_ta" + done From 782d4cfa1d4d9f3a43c4bcdce98eebaebd84af1f Mon Sep 17 00:00:00 2001 From: Tomas Nevrlka Date: Thu, 19 Dec 2024 16:45:52 +0100 Subject: [PATCH 5/5] upgrade docker-build pipelines to use buildah 0.3 - Buildah task has been updated to 0.3 - Make docker-build pipelines use the new version --- .../README.md | 28 +++++++++---------- pipelines/docker-build-oci-ta/README.md | 28 +++++++++---------- pipelines/docker-build/README.md | 28 +++++++++---------- pipelines/docker-build/patch.yaml | 2 +- pipelines/fbc-builder/README.md | 28 +++++++++---------- 5 files changed, 53 insertions(+), 61 deletions(-) diff --git a/pipelines/docker-build-multi-platform-oci-ta/README.md b/pipelines/docker-build-multi-platform-oci-ta/README.md index ecc2ac3f9f..2e646b2d1f 100644 --- a/pipelines/docker-build-multi-platform-oci-ta/README.md +++ b/pipelines/docker-build-multi-platform-oci-ta/README.md @@ -7,18 +7,18 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ## Parameters |name|description|default value|used in (taskname:taskrefversion:taskparam)| |---|---|---|---| -|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-images:0.2:BUILD_ARGS| -|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-images:0.2:BUILD_ARGS_FILE| +|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-images:0.3:BUILD_ARGS| +|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-images:0.3:BUILD_ARGS_FILE| |build-image-index| Add built image into an OCI image index| true| build-image-index:0.1:ALWAYS_BUILD_INDEX| |build-platforms| List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller.| ['linux/x86_64']| | |build-source-image| Build a source image.| false| | -|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-images:0.2:DOCKERFILE ; push-dockerfile:0.1:DOCKERFILE| +|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-images:0.3:DOCKERFILE ; push-dockerfile:0.1:DOCKERFILE| |git-url| Source Repository URL| None| clone-repository:0.1:url| -|hermetic| Execute the build with network isolation| false| build-images:0.2:HERMETIC| -|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | clone-repository:0.1:ociArtifactExpiresAfter ; prefetch-dependencies:0.1:ociArtifactExpiresAfter ; build-images:0.2:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| -|output-image| Fully Qualified Output Image| None| init:0.2:image-url ; clone-repository:0.1:ociStorage ; prefetch-dependencies:0.1:ociStorage ; build-images:0.2:IMAGE ; build-image-index:0.1:IMAGE ; build-source-image:0.1:BINARY_IMAGE| -|path-context| Path to the source code of an application's component from where to build image.| .| build-images:0.2:CONTEXT ; push-dockerfile:0.1:CONTEXT| -|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-images:0.2:PREFETCH_INPUT| +|hermetic| Execute the build with network isolation| false| build-images:0.3:HERMETIC| +|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | clone-repository:0.1:ociArtifactExpiresAfter ; prefetch-dependencies:0.1:ociArtifactExpiresAfter ; build-images:0.3:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| +|output-image| Fully Qualified Output Image| None| init:0.2:image-url ; clone-repository:0.1:ociStorage ; prefetch-dependencies:0.1:ociStorage ; build-images:0.3:IMAGE ; build-image-index:0.1:IMAGE ; build-source-image:0.1:BINARY_IMAGE| +|path-context| Path to the source code of an application's component from where to build image.| .| build-images:0.3:CONTEXT ; push-dockerfile:0.1:CONTEXT| +|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-images:0.3:PREFETCH_INPUT| |rebuild| Force rebuild image| false| init:0.2:rebuild| |revision| Revision of the Source Repository| | clone-repository:0.1:revision| |skip-checks| Skip checks against built image| false| init:0.2:skip-checks| @@ -41,7 +41,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_EXPIRES_AFTER| Delete image tag after specified time resulting in garbage collection of the digest. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | '$(params.image-expires-after)'| |STORAGE_DRIVER| Storage driver to configure for buildah| vfs| | |TLSVERIFY| Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)| true| | -### buildah-remote-oci-ta:0.2 task parameters +### buildah-remote-oci-ta:0.3 task parameters |name|description|default value|already set by| |---|---|---|---| |ACTIVATION_KEY| Name of secret which contains subscription activation key| activation-key| | @@ -257,15 +257,13 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_REF| Image reference of the built image containing both the repository and the digest| | |IMAGE_URL| Image repository and tag where the built image was pushed| show-sbom:0.1:IMAGE_URL ; deprecated-base-image-check:0.4:IMAGE_URL ; clair-scan:0.2:image-url ; ecosystem-cert-preflight-checks:0.1:image-url ; sast-snyk-check:0.3:image-url ; clamav-scan:0.2:image-url ; sast-coverity-check:0.1:image-url ; coverity-availability-check:0.1:image-url ; sast-shell-check:0.1:image-url ; sast-unicode-check:0.1:image-url ; apply-tags:0.1:IMAGE ; push-dockerfile:0.1:IMAGE ; rpms-signature-scan:0.2:image-url| |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -### buildah-remote-oci-ta:0.2 task results +### buildah-remote-oci-ta:0.3 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| |IMAGE_DIGEST| Digest of the image just built| | |IMAGE_REF| Image reference of the built image| build-image-index:0.1:IMAGES| |IMAGE_URL| Image repository and tag where the built image was pushed| | -|JAVA_COMMUNITY_DEPENDENCIES| The Java dependencies that came from community sources such as Maven central.| | |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -|SBOM_JAVA_COMPONENTS_COUNT| The counting of Java components by publisher in JSON format| | ### clair-scan:0.2 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| @@ -298,7 +296,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |CHAINS-GIT_COMMIT| The precise commit SHA that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |CHAINS-GIT_URL| The precise URL that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| prefetch-dependencies:0.1:SOURCE_ARTIFACT| -|commit| The precise commit SHA that was fetched by this Task.| build-images:0.2:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| +|commit| The precise commit SHA that was fetched by this Task.| build-images:0.3:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| |commit-timestamp| The commit timestamp of the checkout| | |short-commit| The commit SHA that was fetched by this Task limited to params.shortCommitLength number of characters| | |url| The precise URL that was fetched by this Task.| | @@ -309,8 +307,8 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ### prefetch-dependencies-oci-ta:0.1 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| -|CACHI2_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.| build-images:0.2:CACHI2_ARTIFACT ; build-source-image:0.1:CACHI2_ARTIFACT ; sast-snyk-check:0.3:CACHI2_ARTIFACT ; sast-coverity-check:0.1:CACHI2_ARTIFACT ; coverity-availability-check:0.1:CACHI2_ARTIFACT ; sast-shell-check:0.1:CACHI2_ARTIFACT ; sast-unicode-check:0.1:CACHI2_ARTIFACT| -|SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| build-images:0.2:SOURCE_ARTIFACT ; build-source-image:0.1:SOURCE_ARTIFACT ; sast-snyk-check:0.3:SOURCE_ARTIFACT ; sast-coverity-check:0.1:SOURCE_ARTIFACT ; coverity-availability-check:0.1:SOURCE_ARTIFACT ; sast-shell-check:0.1:SOURCE_ARTIFACT ; sast-unicode-check:0.1:SOURCE_ARTIFACT ; push-dockerfile:0.1:SOURCE_ARTIFACT| +|CACHI2_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.| build-images:0.3:CACHI2_ARTIFACT ; build-source-image:0.1:CACHI2_ARTIFACT ; sast-snyk-check:0.3:CACHI2_ARTIFACT ; sast-coverity-check:0.1:CACHI2_ARTIFACT ; coverity-availability-check:0.1:CACHI2_ARTIFACT ; sast-shell-check:0.1:CACHI2_ARTIFACT ; sast-unicode-check:0.1:CACHI2_ARTIFACT| +|SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| build-images:0.3:SOURCE_ARTIFACT ; build-source-image:0.1:SOURCE_ARTIFACT ; sast-snyk-check:0.3:SOURCE_ARTIFACT ; sast-coverity-check:0.1:SOURCE_ARTIFACT ; coverity-availability-check:0.1:SOURCE_ARTIFACT ; sast-shell-check:0.1:SOURCE_ARTIFACT ; sast-unicode-check:0.1:SOURCE_ARTIFACT ; push-dockerfile:0.1:SOURCE_ARTIFACT| ### push-dockerfile-oci-ta:0.1 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| diff --git a/pipelines/docker-build-oci-ta/README.md b/pipelines/docker-build-oci-ta/README.md index f9b3f8b35f..a639a829be 100644 --- a/pipelines/docker-build-oci-ta/README.md +++ b/pipelines/docker-build-oci-ta/README.md @@ -7,17 +7,17 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ## Parameters |name|description|default value|used in (taskname:taskrefversion:taskparam)| |---|---|---|---| -|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-container:0.2:BUILD_ARGS| -|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-container:0.2:BUILD_ARGS_FILE| +|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-container:0.3:BUILD_ARGS| +|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-container:0.3:BUILD_ARGS_FILE| |build-image-index| Add built image into an OCI image index| false| build-image-index:0.1:ALWAYS_BUILD_INDEX| |build-source-image| Build a source image.| false| | -|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-container:0.2:DOCKERFILE ; push-dockerfile:0.1:DOCKERFILE| +|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-container:0.3:DOCKERFILE ; push-dockerfile:0.1:DOCKERFILE| |git-url| Source Repository URL| None| clone-repository:0.1:url| -|hermetic| Execute the build with network isolation| false| build-container:0.2:HERMETIC| -|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | clone-repository:0.1:ociArtifactExpiresAfter ; prefetch-dependencies:0.1:ociArtifactExpiresAfter ; build-container:0.2:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| -|output-image| Fully Qualified Output Image| None| init:0.2:image-url ; clone-repository:0.1:ociStorage ; prefetch-dependencies:0.1:ociStorage ; build-container:0.2:IMAGE ; build-image-index:0.1:IMAGE ; build-source-image:0.1:BINARY_IMAGE| -|path-context| Path to the source code of an application's component from where to build image.| .| build-container:0.2:CONTEXT ; push-dockerfile:0.1:CONTEXT| -|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-container:0.2:PREFETCH_INPUT| +|hermetic| Execute the build with network isolation| false| build-container:0.3:HERMETIC| +|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | clone-repository:0.1:ociArtifactExpiresAfter ; prefetch-dependencies:0.1:ociArtifactExpiresAfter ; build-container:0.3:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| +|output-image| Fully Qualified Output Image| None| init:0.2:image-url ; clone-repository:0.1:ociStorage ; prefetch-dependencies:0.1:ociStorage ; build-container:0.3:IMAGE ; build-image-index:0.1:IMAGE ; build-source-image:0.1:BINARY_IMAGE| +|path-context| Path to the source code of an application's component from where to build image.| .| build-container:0.3:CONTEXT ; push-dockerfile:0.1:CONTEXT| +|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-container:0.3:PREFETCH_INPUT| |rebuild| Force rebuild image| false| init:0.2:rebuild| |revision| Revision of the Source Repository| | clone-repository:0.1:revision| |skip-checks| Skip checks against built image| false| init:0.2:skip-checks| @@ -40,7 +40,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_EXPIRES_AFTER| Delete image tag after specified time resulting in garbage collection of the digest. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | '$(params.image-expires-after)'| |STORAGE_DRIVER| Storage driver to configure for buildah| vfs| | |TLSVERIFY| Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)| true| | -### buildah-oci-ta:0.2 task parameters +### buildah-oci-ta:0.3 task parameters |name|description|default value|already set by| |---|---|---|---| |ACTIVATION_KEY| Name of secret which contains subscription activation key| activation-key| | @@ -254,15 +254,13 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_REF| Image reference of the built image containing both the repository and the digest| | |IMAGE_URL| Image repository and tag where the built image was pushed| show-sbom:0.1:IMAGE_URL ; deprecated-base-image-check:0.4:IMAGE_URL ; clair-scan:0.2:image-url ; ecosystem-cert-preflight-checks:0.1:image-url ; sast-snyk-check:0.3:image-url ; clamav-scan:0.2:image-url ; sast-coverity-check:0.1:image-url ; coverity-availability-check:0.1:image-url ; sast-shell-check:0.1:image-url ; sast-unicode-check:0.1:image-url ; apply-tags:0.1:IMAGE ; push-dockerfile:0.1:IMAGE ; rpms-signature-scan:0.2:image-url| |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -### buildah-oci-ta:0.2 task results +### buildah-oci-ta:0.3 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| |IMAGE_DIGEST| Digest of the image just built| | |IMAGE_REF| Image reference of the built image| | |IMAGE_URL| Image repository and tag where the built image was pushed| build-image-index:0.1:IMAGES| -|JAVA_COMMUNITY_DEPENDENCIES| The Java dependencies that came from community sources such as Maven central.| | |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -|SBOM_JAVA_COMPONENTS_COUNT| The counting of Java components by publisher in JSON format| | ### clair-scan:0.2 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| @@ -295,7 +293,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |CHAINS-GIT_COMMIT| The precise commit SHA that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |CHAINS-GIT_URL| The precise URL that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| prefetch-dependencies:0.1:SOURCE_ARTIFACT| -|commit| The precise commit SHA that was fetched by this Task.| build-container:0.2:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| +|commit| The precise commit SHA that was fetched by this Task.| build-container:0.3:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| |commit-timestamp| The commit timestamp of the checkout| | |short-commit| The commit SHA that was fetched by this Task limited to params.shortCommitLength number of characters| | |url| The precise URL that was fetched by this Task.| | @@ -306,8 +304,8 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ### prefetch-dependencies-oci-ta:0.1 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| -|CACHI2_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.| build-container:0.2:CACHI2_ARTIFACT ; build-source-image:0.1:CACHI2_ARTIFACT ; sast-snyk-check:0.3:CACHI2_ARTIFACT ; sast-coverity-check:0.1:CACHI2_ARTIFACT ; coverity-availability-check:0.1:CACHI2_ARTIFACT ; sast-shell-check:0.1:CACHI2_ARTIFACT ; sast-unicode-check:0.1:CACHI2_ARTIFACT| -|SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| build-container:0.2:SOURCE_ARTIFACT ; build-source-image:0.1:SOURCE_ARTIFACT ; sast-snyk-check:0.3:SOURCE_ARTIFACT ; sast-coverity-check:0.1:SOURCE_ARTIFACT ; coverity-availability-check:0.1:SOURCE_ARTIFACT ; sast-shell-check:0.1:SOURCE_ARTIFACT ; sast-unicode-check:0.1:SOURCE_ARTIFACT ; push-dockerfile:0.1:SOURCE_ARTIFACT| +|CACHI2_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.| build-container:0.3:CACHI2_ARTIFACT ; build-source-image:0.1:CACHI2_ARTIFACT ; sast-snyk-check:0.3:CACHI2_ARTIFACT ; sast-coverity-check:0.1:CACHI2_ARTIFACT ; coverity-availability-check:0.1:CACHI2_ARTIFACT ; sast-shell-check:0.1:CACHI2_ARTIFACT ; sast-unicode-check:0.1:CACHI2_ARTIFACT| +|SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| build-container:0.3:SOURCE_ARTIFACT ; build-source-image:0.1:SOURCE_ARTIFACT ; sast-snyk-check:0.3:SOURCE_ARTIFACT ; sast-coverity-check:0.1:SOURCE_ARTIFACT ; coverity-availability-check:0.1:SOURCE_ARTIFACT ; sast-shell-check:0.1:SOURCE_ARTIFACT ; sast-unicode-check:0.1:SOURCE_ARTIFACT ; push-dockerfile:0.1:SOURCE_ARTIFACT| ### push-dockerfile-oci-ta:0.1 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| diff --git a/pipelines/docker-build/README.md b/pipelines/docker-build/README.md index bf5f15cf5d..329ae149d4 100644 --- a/pipelines/docker-build/README.md +++ b/pipelines/docker-build/README.md @@ -7,17 +7,17 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ## Parameters |name|description|default value|used in (taskname:taskrefversion:taskparam)| |---|---|---|---| -|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-container:0.2:BUILD_ARGS| -|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-container:0.2:BUILD_ARGS_FILE| +|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-container:0.3:BUILD_ARGS| +|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-container:0.3:BUILD_ARGS_FILE| |build-image-index| Add built image into an OCI image index| false| build-image-index:0.1:ALWAYS_BUILD_INDEX| |build-source-image| Build a source image.| false| | -|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-container:0.2:DOCKERFILE ; push-dockerfile:0.1:DOCKERFILE| +|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-container:0.3:DOCKERFILE ; push-dockerfile:0.1:DOCKERFILE| |git-url| Source Repository URL| None| clone-repository:0.1:url| -|hermetic| Execute the build with network isolation| false| build-container:0.2:HERMETIC| -|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | build-container:0.2:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| -|output-image| Fully Qualified Output Image| None| show-summary:0.2:image-url ; init:0.2:image-url ; build-container:0.2:IMAGE ; build-image-index:0.1:IMAGE ; build-source-image:0.1:BINARY_IMAGE| -|path-context| Path to the source code of an application's component from where to build image.| .| build-container:0.2:CONTEXT ; push-dockerfile:0.1:CONTEXT| -|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-container:0.2:PREFETCH_INPUT| +|hermetic| Execute the build with network isolation| false| build-container:0.3:HERMETIC| +|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | build-container:0.3:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| +|output-image| Fully Qualified Output Image| None| show-summary:0.2:image-url ; init:0.2:image-url ; build-container:0.3:IMAGE ; build-image-index:0.1:IMAGE ; build-source-image:0.1:BINARY_IMAGE| +|path-context| Path to the source code of an application's component from where to build image.| .| build-container:0.3:CONTEXT ; push-dockerfile:0.1:CONTEXT| +|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-container:0.3:PREFETCH_INPUT| |rebuild| Force rebuild image| false| init:0.2:rebuild| |revision| Revision of the Source Repository| | clone-repository:0.1:revision| |skip-checks| Skip checks against built image| false| init:0.2:skip-checks| @@ -40,7 +40,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_EXPIRES_AFTER| Delete image tag after specified time resulting in garbage collection of the digest. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | '$(params.image-expires-after)'| |STORAGE_DRIVER| Storage driver to configure for buildah| vfs| | |TLSVERIFY| Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)| true| | -### buildah:0.2 task parameters +### buildah:0.3 task parameters |name|description|default value|already set by| |---|---|---|---| |ACTIVATION_KEY| Name of secret which contains subscription activation key| activation-key| | @@ -244,15 +244,13 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_REF| Image reference of the built image containing both the repository and the digest| | |IMAGE_URL| Image repository and tag where the built image was pushed| show-sbom:0.1:IMAGE_URL ; deprecated-base-image-check:0.4:IMAGE_URL ; clair-scan:0.2:image-url ; ecosystem-cert-preflight-checks:0.1:image-url ; sast-snyk-check:0.3:image-url ; clamav-scan:0.2:image-url ; sast-coverity-check:0.1:image-url ; coverity-availability-check:0.1:image-url ; sast-shell-check:0.1:image-url ; sast-unicode-check:0.1:image-url ; apply-tags:0.1:IMAGE ; push-dockerfile:0.1:IMAGE ; rpms-signature-scan:0.2:image-url| |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -### buildah:0.2 task results +### buildah:0.3 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| |IMAGE_DIGEST| Digest of the image just built| | |IMAGE_REF| Image reference of the built image| | |IMAGE_URL| Image repository and tag where the built image was pushed| build-image-index:0.1:IMAGES| -|JAVA_COMMUNITY_DEPENDENCIES| The Java dependencies that came from community sources such as Maven central.| | |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -|SBOM_JAVA_COMPONENTS_COUNT| The counting of Java components by publisher in JSON format| | ### clair-scan:0.2 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| @@ -284,7 +282,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |---|---|---| |CHAINS-GIT_COMMIT| The precise commit SHA that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |CHAINS-GIT_URL| The precise URL that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | -|commit| The precise commit SHA that was fetched by this Task.| build-container:0.2:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| +|commit| The precise commit SHA that was fetched by this Task.| build-container:0.3:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| |commit-timestamp| The commit timestamp of the checkout| | |short-commit| The commit SHA that was fetched by this Task limited to params.shortCommitLength number of characters| | |url| The precise URL that was fetched by this Task.| show-summary:0.2:git-url| @@ -331,9 +329,9 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |---|---|---|---| |git-auth| |True| clone-repository:0.1:basic-auth ; prefetch-dependencies:0.1:git-basic-auth| |netrc| |True| prefetch-dependencies:0.1:netrc| -|workspace| |False| show-summary:0.2:workspace ; clone-repository:0.1:output ; prefetch-dependencies:0.1:source ; build-container:0.2:source ; build-source-image:0.1:workspace ; sast-snyk-check:0.3:workspace ; sast-coverity-check:0.1:workspace ; coverity-availability-check:0.1:workspace ; sast-shell-check:0.1:workspace ; sast-unicode-check:0.1:workspace ; push-dockerfile:0.1:workspace| +|workspace| |False| show-summary:0.2:workspace ; clone-repository:0.1:output ; prefetch-dependencies:0.1:source ; build-container:0.3:source ; build-source-image:0.1:workspace ; sast-snyk-check:0.3:workspace ; sast-coverity-check:0.1:workspace ; coverity-availability-check:0.1:workspace ; sast-shell-check:0.1:workspace ; sast-unicode-check:0.1:workspace ; push-dockerfile:0.1:workspace| ## Available workspaces from tasks -### buildah:0.2 task workspaces +### buildah:0.3 task workspaces |name|description|optional|workspace from pipeline |---|---|---|---| |source| Workspace containing the source code to build.| False| workspace| diff --git a/pipelines/docker-build/patch.yaml b/pipelines/docker-build/patch.yaml index 81ea422c2d..9f66f12ba3 100644 --- a/pipelines/docker-build/patch.yaml +++ b/pipelines/docker-build/patch.yaml @@ -40,7 +40,7 @@ path: /spec/tasks/3/taskRef value: name: buildah - version: "0.2" + version: "0.3" - op: add path: /spec/params/- value: diff --git a/pipelines/fbc-builder/README.md b/pipelines/fbc-builder/README.md index c2510879fd..2057044844 100644 --- a/pipelines/fbc-builder/README.md +++ b/pipelines/fbc-builder/README.md @@ -7,18 +7,18 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ## Parameters |name|description|default value|used in (taskname:taskrefversion:taskparam)| |---|---|---|---| -|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-images:0.2:BUILD_ARGS| -|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-images:0.2:BUILD_ARGS_FILE| +|build-args| Array of --build-arg values ("arg=value" strings) for buildah| []| build-images:0.3:BUILD_ARGS| +|build-args-file| Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file| | build-images:0.3:BUILD_ARGS_FILE| |build-image-index| Add built image into an OCI image index| true| build-image-index:0.1:ALWAYS_BUILD_INDEX| |build-platforms| List of platforms to build the container images on. The available set of values is determined by the configuration of the multi-platform-controller.| ['linux/x86_64']| | |build-source-image| Build a source image.| false| | -|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-images:0.2:DOCKERFILE| +|dockerfile| Path to the Dockerfile inside the context specified by parameter path-context| Dockerfile| build-images:0.3:DOCKERFILE| |git-url| Source Repository URL| None| clone-repository:0.1:url| -|hermetic| Execute the build with network isolation| true| build-images:0.2:HERMETIC| -|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | clone-repository:0.1:ociArtifactExpiresAfter ; prefetch-dependencies:0.1:ociArtifactExpiresAfter ; build-images:0.2:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| -|output-image| Fully Qualified Output Image| None| init:0.2:image-url ; clone-repository:0.1:ociStorage ; prefetch-dependencies:0.1:ociStorage ; build-images:0.2:IMAGE ; build-image-index:0.1:IMAGE| -|path-context| Path to the source code of an application's component from where to build image.| .| build-images:0.2:CONTEXT| -|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-images:0.2:PREFETCH_INPUT| +|hermetic| Execute the build with network isolation| true| build-images:0.3:HERMETIC| +|image-expires-after| Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | clone-repository:0.1:ociArtifactExpiresAfter ; prefetch-dependencies:0.1:ociArtifactExpiresAfter ; build-images:0.3:IMAGE_EXPIRES_AFTER ; build-image-index:0.1:IMAGE_EXPIRES_AFTER| +|output-image| Fully Qualified Output Image| None| init:0.2:image-url ; clone-repository:0.1:ociStorage ; prefetch-dependencies:0.1:ociStorage ; build-images:0.3:IMAGE ; build-image-index:0.1:IMAGE| +|path-context| Path to the source code of an application's component from where to build image.| .| build-images:0.3:CONTEXT| +|prefetch-input| Build dependencies to be prefetched by Cachi2| | prefetch-dependencies:0.1:input ; build-images:0.3:PREFETCH_INPUT| |rebuild| Force rebuild image| false| init:0.2:rebuild| |revision| Revision of the Source Repository| | clone-repository:0.1:revision| |skip-checks| Skip checks against built image| false| init:0.2:skip-checks| @@ -41,7 +41,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_EXPIRES_AFTER| Delete image tag after specified time resulting in garbage collection of the digest. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.| | '$(params.image-expires-after)'| |STORAGE_DRIVER| Storage driver to configure for buildah| vfs| | |TLSVERIFY| Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)| true| | -### buildah-remote-oci-ta:0.2 task parameters +### buildah-remote-oci-ta:0.3 task parameters |name|description|default value|already set by| |---|---|---|---| |ACTIVATION_KEY| Name of secret which contains subscription activation key| activation-key| | @@ -152,15 +152,13 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |IMAGE_REF| Image reference of the built image containing both the repository and the digest| | |IMAGE_URL| Image repository and tag where the built image was pushed| show-sbom:0.1:IMAGE_URL ; deprecated-base-image-check:0.4:IMAGE_URL ; apply-tags:0.1:IMAGE ; validate-fbc:0.1:IMAGE_URL| |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -### buildah-remote-oci-ta:0.2 task results +### buildah-remote-oci-ta:0.3 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| |IMAGE_DIGEST| Digest of the image just built| | |IMAGE_REF| Image reference of the built image| build-image-index:0.1:IMAGES| |IMAGE_URL| Image repository and tag where the built image was pushed| | -|JAVA_COMMUNITY_DEPENDENCIES| The Java dependencies that came from community sources such as Maven central.| | |SBOM_BLOB_URL| Reference of SBOM blob digest to enable digest-based verification from provenance| | -|SBOM_JAVA_COMPONENTS_COUNT| The counting of Java components by publisher in JSON format| | ### deprecated-image-check:0.4 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| @@ -172,7 +170,7 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito |CHAINS-GIT_COMMIT| The precise commit SHA that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |CHAINS-GIT_URL| The precise URL that was fetched by this Task. This result uses Chains type hinting to include in the provenance.| | |SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| prefetch-dependencies:0.1:SOURCE_ARTIFACT| -|commit| The precise commit SHA that was fetched by this Task.| build-images:0.2:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| +|commit| The precise commit SHA that was fetched by this Task.| build-images:0.3:COMMIT_SHA ; build-image-index:0.1:COMMIT_SHA| |commit-timestamp| The commit timestamp of the checkout| | |short-commit| The commit SHA that was fetched by this Task limited to params.shortCommitLength number of characters| | |url| The precise URL that was fetched by this Task.| | @@ -183,8 +181,8 @@ This pipeline is pushed as a Tekton bundle to [quay.io](https://quay.io/reposito ### prefetch-dependencies-oci-ta:0.1 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---| -|CACHI2_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.| build-images:0.2:CACHI2_ARTIFACT| -|SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| build-images:0.2:SOURCE_ARTIFACT| +|CACHI2_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.| build-images:0.3:CACHI2_ARTIFACT| +|SOURCE_ARTIFACT| The Trusted Artifact URI pointing to the artifact with the application source code.| build-images:0.3:SOURCE_ARTIFACT| ### validate-fbc:0.1 task results |name|description|used in params (taskname:taskrefversion:taskparam) |---|---|---|