-
Notifications
You must be signed in to change notification settings - Fork 52
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(SPSTRAT-465): add task for marketplacesvm
This commit introduces a new task named `marketplaces-push-disk-images` which will be used to deliver disk images to various cloud marketplaces using the `marketplacesvm_push_wrapper`. Signed-off-by: Jonathan Gangi <[email protected]>
- Loading branch information
Showing
5 changed files
with
399 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
# marketplaces-push-disk-images | ||
|
||
Tekton Task to publish VM disk images into various cloud marketplaces using `pubtools-marketplacesvm`. | ||
|
||
It currently supports images in `raw` and `vhd` formats for `AWS` and `Azure` respectively. | ||
|
||
## Parameters | ||
|
||
| Name | Description | Optional | Default value | | ||
| ----------------------- | -------------------------------------------------------------------------------------- | -------- | --------------- | | ||
| snapshotPath | Path to the JSON string of the mapped snapshot spec in the data workspace. | No | - | | ||
| dataPath | Path to the data JSON in the workspace containing the marketplacesvm options to use. | No | - | | ||
| cloudMarketplacesSecret | Env specific secret containing the marketplaces credentials. | No | - | | ||
| concurrentLimit | The maximum number of images to be pulled at once. | Yes | 3 | |
168 changes: 168 additions & 0 deletions
168
tasks/marketplacesvm-push-disk-images-task/marketplaces-push-disk-images-task.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,168 @@ | ||
--- | ||
apiVersion: tekton.dev/v1 | ||
kind: Task | ||
metadata: | ||
name: marketplaces-push-disk-images | ||
labels: | ||
app.kubernetes.io/version: "0.1.0" | ||
annotations: | ||
tekton.dev/pipelines.minVersion: "0.12.1" | ||
tekton.dev/tags: release | ||
spec: | ||
description: >- | ||
Tekton task to push disk images to Cloud Marketplaces | ||
params: | ||
- name: snapshotPath | ||
type: string | ||
description: | | ||
Path to the JSON string of the mapped snapshot spec in the data workspace. | ||
It must be processed by the "apply-mapping" task first. | ||
- name: dataPath | ||
description: Path to the JSON string of the merged data to use in the data workspace | ||
- name: cloudMarketplacesSecret | ||
type: string | ||
description: Env specific secret containing the marketplaces credentials. | ||
- name: concurrentLimit | ||
type: string | ||
description: The maximum number of images to be pulled at once. | ||
default: 3 | ||
results: | ||
- name: result | ||
description: Success if the task succeeds, the error otherwise | ||
steps: | ||
- name: pull-and-push-images-to-marketplaces | ||
image: quay.io/konflux-ci/release-service-utils:7f7a156a835c773bbcd7e5d7e44df2f573db14f2 | ||
env: | ||
- name: CLOUD_CREDENTIALS | ||
valueFrom: | ||
secretKeyRef: | ||
name: $(params.cloudMarketplacesSecret) | ||
key: key | ||
script: | | ||
#!/usr/bin/env bash | ||
set -ex | ||
STDERR_FILE=/tmp/stderr.txt | ||
exitfunc() { | ||
local err=$1 | ||
local line=$2 | ||
local command="$3" | ||
if [ "$err" -eq 0 ] ; then | ||
echo -n "Success" > "$(results.result.path)" | ||
else | ||
echo "$0: ERROR '$command' failed at line $line - exited with status $err" \ | ||
> "$(results.result.path)" | ||
if [ -f "$STDERR_FILE" ] ; then | ||
tail -n 20 "$STDERR_FILE" >> "$(results.result.path)" | ||
fi | ||
fi | ||
exit 0 # exit the script cleanly as there is no point in proceeding past an error or exit call | ||
} | ||
# due to set -e, this catches all EXIT and ERR calls and the task should never fail with nonzero exit code | ||
trap 'exitfunc $? $LINENO "$BASH_COMMAND"' EXIT | ||
# Setup required variables | ||
set +x | ||
SNAPSHOT_JSON=$(jq -c '.' "$(workspaces.data.path)/$(params.snapshotPath)") | ||
PRODUCT_INFO=$(jq -c '.productInfo' "$(workspaces.data.path)/$(params.dataPath)") | ||
STARMAP_MAPPING=$(jq -c '.starmapMapping' "$(workspaces.data.path)/$(params.dataPath)") | ||
STARMAP_MAPPING_FILE=$(workspaces.data.path)/starmap.yaml | ||
yq -p json -o yaml <<< "$STARMAP_MAPPING" > "$STARMAP_MAPPING_FILE" | ||
set -x | ||
BASE_DIR="$(mktemp -d)" | ||
DISK_IMGS_DIR="${BASE_DIR}/starmap/CLOUD_IMAGES" | ||
mkdir -p "${DISK_IMGS_DIR}" | ||
RUNNING_JOBS="\j" # Bash parameter for number of jobs currently running | ||
NUM_COMPONENTS=$(jq '.components | length' <<< "$SNAPSHOT_JSON") | ||
process_component() { # Expected argument is [component json] | ||
COMPONENT=$1 | ||
PULLSPEC=$(jq -er '.containerImage' <<< "${COMPONENT}") | ||
IMG_NAME=$(jq -er '.name' <<< "${COMPONENT}") | ||
BUILD_NAME=$(jq -er '.productCode' <<< "${PRODUCT_INFO}") | ||
BUILD_VERSION=$(jq -er '.productVersionName' <<< "${PRODUCT_INFO}") | ||
BUILD_ARCH=$(jq -er '.staged.files[0].filename' <<< "${COMPONENT}") | ||
BUILD_ARCH=${BUILD_ARCH%\.*} # Rstrip on . to remove the extension | ||
BUILD_ARCH=${BUILD_ARCH##*-} # Lstrip on - on get the arch | ||
RESOURCES_JSON=' | ||
{ | ||
"api": "v1", | ||
"resource": "CloudImage", | ||
"description": "", | ||
"boot_mode": "hybrid", | ||
"build": {}, | ||
"images": [] | ||
}' | ||
RESOURCES_JSON=$(jq -c \ | ||
--arg build_name "$BUILD_NAME" \ | ||
--arg build_arch "$BUILD_ARCH" \ | ||
--arg build_version "$BUILD_VERSION" \ | ||
'.build.name=$build_name | | ||
.build.arch=$build_arch | | ||
.build.version=$build_version' <<< "$RESOURCES_JSON" | ||
DESTINATION="${DISK_IMGS_DIR}/${IMG_NAME}" | ||
mkdir -p "${DESTINATION}" | ||
DOWNLOAD_DIR=$(mktemp -d) | ||
cd "$DOWNLOAD_DIR" | ||
# oras has very limited support for selecting the right auth entry, | ||
# so create a custom auth file with just one entry | ||
AUTH_FILE=$(mktemp) | ||
select-oci-auth "${PULLSPEC}" > "$AUTH_FILE" | ||
oras pull --registry-config "$AUTH_FILE" "$PULLSPEC" | ||
NUM_MAPPED_FILES=$(jq '.staged.files | length' <<< "${COMPONENT}") | ||
for ((i = 0; i < NUM_MAPPED_FILES; i++)); do | ||
FILE=$(jq -c --arg i "$i" '.staged.files[$i|tonumber]' <<< "$COMPONENT") | ||
SOURCE=$(jq -er '.source' <<< "$FILE") | ||
FILENAME=$(jq -er '.filename' <<< "$FILE") | ||
if [ -f "${SOURCE}.gz" ]; then | ||
gzip -d "${SOURCE}.gz" | ||
fi | ||
if [ -f "${DESTINATION}/${FILENAME}" ]; then | ||
echo -n "Multiple files use the same destination value: $DESTINATION" >&2 | ||
echo " and filename value: $FILENAME. Failing..." >&2 | ||
exit 1 | ||
fi | ||
if [ "${FILENAME##*\.}" = "vhd" ]; then | ||
image_type="VHD" | ||
elif [ "${FILENAME##*\.}" = "raw" ]; then | ||
image_type="AMI" | ||
else | ||
continue | ||
fi | ||
mv "$SOURCE" "${DESTINATION_FILE}" || echo "didn't find mapped file: ${SOURCE}" | ||
RESOURCES_JSON=$(jq --arg filename "$FILENAME" \ | ||
'.images[.images | length] = {"path": $filename, "architecture": "$arch"}' <<< "$RESOURCES_JSON") | ||
RESOURCES_JSON=$(jq --arg image_type "$image_type" \ | ||
'.type = "$image_type"' <<< "$RESOURCES_JSON") | ||
done | ||
echo "$RESOURCES_JSON" | yq -P -I 4 > "$DESTINATION/resources.yaml" | ||
} | ||
# Process each component in parallel | ||
for ((i = 0; i < NUM_COMPONENTS; i++)); do | ||
COMPONENT=$(jq -c --arg i "$i" '.components[$i|tonumber]' <<< "$SNAPSHOT_JSON") | ||
# Limit batch size to concurrent limit | ||
while (( ${RUNNING_JOBS@P} >= $(params.concurrentLimit) )); do | ||
wait -n | ||
done | ||
process_component "$COMPONENT" 2> "$STDERR_FILE" & | ||
done | ||
# Wait for remaining processes to finish | ||
while (( ${RUNNING_JOBS@P} > 0 )); do | ||
wait -n | ||
done | ||
# Change to the base directory | ||
cd "${BASE_DIR}" | ||
# Validate the staged structure using pushsource-ls | ||
pushsource-ls "staged:${BASE_DIR}" 2> "$STDERR_FILE" | ||
# Process the push | ||
marketplacesvm_push_wrapper \ | ||
--debug --source "${BASE_DIR}" \ | ||
--starmap-file "$STARMAP_MAPPING_FILE" 2> "$STDERR_FILE" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
#!/usr/bin/env bash | ||
set -ex | ||
|
||
# mocks to be injected into task step scripts | ||
function select-oci-auth() { | ||
echo Mock select-oci-auth called with: $* | ||
echo $* > "$(workspaces.data.path)/mock_select-oci-auth.txt" | ||
} | ||
|
||
function oras() { | ||
echo Mock oras called with: $* | ||
echo $* > "$(workspaces.data.path)/mock_oras.txt" | ||
|
||
if [[ "$*" != "pull --registry-config"* ]]; then | ||
echo Error: Unexpected call to oras | ||
exit 1 | ||
fi | ||
} | ||
|
||
function marketplacesvm_push_wrapper() { | ||
echo Mock imarketplacesvm_push_wrapper called with: $* | ||
echo $* > "$(workspaces.data.path)/mock_wrapper.txt" | ||
|
||
/home/pubtools-marketplacesvm-wrapper/marketplacesvm_push_wrapper "$@" --dry-run | ||
|
||
if ! [[ "$?" -eq 0 ]]; then | ||
echo Unexpected call to marketplacesvm_push_wrapper | ||
exit 1 | ||
fi | ||
} | ||
|
11 changes: 11 additions & 0 deletions
11
tasks/marketplacesvm-push-disk-images-task/tests/pre-apply-task-hook.sh
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
#!/usr/bin/env bash | ||
|
||
TASK_PATH="$1" | ||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) | ||
|
||
# Add mocks to the beginning of task step script | ||
yq -i '.spec.steps[0].script = load_str("'$SCRIPT_DIR'/mocks.sh") + .spec.steps[0].script' "$TASK_PATH" | ||
|
||
# Create a dummy secret (and delete it first if it exists) | ||
kubectl delete secret test-secret --ignore-not-found | ||
kubectl create secret generic test-secret --from-literal=key=eyJ0ZXN0Ijoic2VjcmV0In0K |
Oops, something went wrong.