Skip to content

Commit

Permalink
refactor(RELEASE-1374): split out pulling of artifacts as a step (#779)
Browse files Browse the repository at this point in the history
The push-artifacts-to-cdn-task originally did all in one step -
pulling of the artifacts via oras and then pushing them to cdn.

This change splits the pulling out to a separate step. This is
in preparation of adding another step in between for signing.

An emptyDir is used for sharind data among the steps.

Signed-off-by: Martin Malina <[email protected]>
  • Loading branch information
mmalina authored Jan 24, 2025
1 parent de951da commit f5b6fba
Show file tree
Hide file tree
Showing 3 changed files with 121 additions and 67 deletions.
3 changes: 3 additions & 0 deletions tasks/internal/push-artifacts-to-cdn-task/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,6 @@ Tekton task to push artifacts to CDN and optionally Dev Portal with optional sig
| udcacheSecret | Env specific secret containing the udcache credentials | No | - |
| cgwHostname | The hostname of the content-gateway to publish the metadata to | Yes | https://developers.redhat.com/content-gateway/rest/admin |
| cgwSecret | Env specific secret containing the content gateway credentials | No | - |

## Changes in 0.1.0
* Split out the extracting of the artifacts (via oras pull) to a separate step
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ kind: Task
metadata:
name: push-artifacts-to-cdn-task
labels:
app.kubernetes.io/version: "0.0.1"
app.kubernetes.io/version: "0.1.0"
annotations:
tekton.dev/pipelines.minVersion: "0.12.1"
tekton.dev/tags: release
Expand Down Expand Up @@ -40,9 +40,116 @@ spec:
results:
- name: result
description: Success if the task succeeds, the error otherwise
volumes:
- name: shared-dir
emptyDir: {}
steps:
- name: pull-and-push-images
- name: extract-artifacts
image: quay.io/konflux-ci/release-service-utils:7835e32b1974f956a6c942e24adbc79705cab12e
volumeMounts:
- name: shared-dir
mountPath: /shared
env:
- name: DOCKER_CONFIG_JSON
valueFrom:
secretKeyRef:
name: redhat-workloads-token
key: .dockerconfigjson
- name: "SNAPSHOT_JSON"
value: "$(params.snapshot_json)"
script: |
#!/usr/bin/env bash
set -ex
STDERR_FILE=/tmp/stderr.txt
exitfunc() {
local err=$1
local line=$2
local command="$3"
if [ "$err" -eq 0 ] ; then
echo -n "Success" > "$(results.result.path)"
else
echo "$0: ERROR '$command' failed at line $line - exited with status $err" \
> "$(results.result.path)"
if [ -f "$STDERR_FILE" ] ; then
tail -n 20 "$STDERR_FILE" >> "$(results.result.path)"
fi
fi
exit 0 # exit the script cleanly as there is no point in proceeding past an error or exit call
}
# due to set -e, this catches all EXIT and ERR calls and the task should never fail with nonzero exit code
trap 'exitfunc $? $LINENO "$BASH_COMMAND"' EXIT
mkdir -p ~/.docker
set +x
# Quotes are added to the secret so it applies in k8s nicely. But now we have to remove them
echo "$DOCKER_CONFIG_JSON" | sed -r 's/(^|\})[^{}]+(\{|$)/\1\2/g' > ~/.docker/config.json
set -x
DISK_IMAGE_DIR="/shared/artifacts"
export DISK_IMAGE_DIR
mkdir -p "$DISK_IMAGE_DIR"
process_component() { # Expected argument is [component json]
COMPONENT=$1
PULLSPEC=$(jq -er '.containerImage' <<< "${COMPONENT}")
DESTINATION="${DISK_IMAGE_DIR}/$(jq -er '.staged.destination' <<< "${COMPONENT}")/FILES" \
|| (echo "Missing staged.destination value for component. This should be an existing pulp repo. \
Failing" && exit 1)
mkdir -p "${DESTINATION}"
DOWNLOAD_DIR=$(mktemp -d)
cd "$DOWNLOAD_DIR"
# oras has very limited support for selecting the right auth entry,
# so create a custom auth file with just one entry
AUTH_FILE=$(mktemp)
select-oci-auth "${PULLSPEC}" > "$AUTH_FILE"
oras pull --registry-config "$AUTH_FILE" "$PULLSPEC"
NUM_MAPPED_FILES=$(jq '.staged.files | length' <<< "${COMPONENT}")
for ((i = 0; i < NUM_MAPPED_FILES; i++)) ; do
FILE=$(jq -c --arg i "$i" '.staged.files[$i|tonumber]' <<< "$COMPONENT")
SOURCE=$(jq -er '.source' <<< "$FILE")
FILENAME=$(jq -er '.filename' <<< "$FILE")
# The .qcow2 images are not zipped
if [ -f "${SOURCE}.gz" ] ; then
gzip -d "${SOURCE}.gz"
fi
DESTINATION_FILE="${DESTINATION}/${FILENAME}"
# Albeit a low probability, a race condition can occur since this is run in parallel.
# The race condition is if two files have the same $DESTINATION_FILE and both
# if checks are run before either mv is run a few lines below.
if [ -f "${DESTINATION_FILE}" ] ; then
echo -n "Multiple files use the same destination value: $DESTINATION" >&2
echo " and filename value: $FILENAME. Failing..." >&2
exit 1
fi
mv "$SOURCE" "${DESTINATION_FILE}" || echo "didn't find mapped file: ${SOURCE}"
done
}
RUNNING_JOBS="\j" # Bash parameter for number of jobs currently running
NUM_COMPONENTS=$(jq '.components | length' <<< "$SNAPSHOT_JSON")
# Pull each component in parallel
for ((i = 0; i < NUM_COMPONENTS; i++)) ; do
COMPONENT=$(jq -c --arg i "$i" '.components[$i|tonumber]' <<< "$SNAPSHOT_JSON")
# Limit batch size to concurrent limit
while (( ${RUNNING_JOBS@P} >= $(params.concurrentLimit) )); do
wait -n
done
process_component "$COMPONENT" 2> "$STDERR_FILE" &
done
# Wait for remaining processes to finish
while (( ${RUNNING_JOBS@P} > 0 )); do
wait -n
done
- name: push-images
image: quay.io/konflux-ci/release-service-utils:7835e32b1974f956a6c942e24adbc79705cab12e
volumeMounts:
- name: shared-dir
mountPath: /shared
env:
- name: EXODUS_CERT
valueFrom:
Expand Down Expand Up @@ -89,11 +196,6 @@ spec:
secretKeyRef:
name: $(params.udcacheSecret)
key: key
- name: DOCKER_CONFIG_JSON
valueFrom:
secretKeyRef:
name: redhat-workloads-token
key: .dockerconfigjson
- name: "SNAPSHOT_JSON"
value: "$(params.snapshot_json)"
- name: CGW_USERNAME
Expand All @@ -112,6 +214,12 @@ spec:
STDERR_FILE=/tmp/stderr.txt
# Check if the previous step finished successfully. If not, stop here.
if [ "$(cat "$(results.result.path)")" != "Success" ]; then
echo "Previous step failed. Exiting..."
exit 0
fi
exitfunc() {
local err=$1
local line=$2
Expand Down Expand Up @@ -142,7 +250,6 @@ spec:
export EXODUS_GW_URL="$EXODUS_URL"
export EXODUS_PULP_HOOK_ENABLED=True
export EXODUS_GW_TIMEOUT=7200
mkdir -p ~/.docker
set +x
echo "$EXODUS_CERT" > "$EXODUS_GW_CERT"
Expand All @@ -151,50 +258,11 @@ spec:
echo "$PULP_KEY" > "$PULP_KEY_FILE"
echo "$UDC_CERT" > "$UDCACHE_CERT"
echo "$UDC_KEY" > "$UDCACHE_KEY"
# Quotes are added to the secret so it applies in k8s nicely. But now we have to remove them
echo "$DOCKER_CONFIG_JSON" | sed -r 's/(^|\})[^{}]+(\{|$)/\1\2/g' > ~/.docker/config.json
set -x
DISK_IMAGE_DIR="$(mktemp -d)"
DISK_IMAGE_DIR="/shared/artifacts"
export DISK_IMAGE_DIR
process_component() { # Expected argument is [component json]
COMPONENT=$1
PULLSPEC=$(jq -er '.containerImage' <<< "${COMPONENT}")
DESTINATION="${DISK_IMAGE_DIR}/$(jq -er '.staged.destination' <<< "${COMPONENT}")/FILES" \
|| (echo "Missing staged.destination value for component. This should be an existing pulp repo. \
Failing" && exit 1)
mkdir -p "${DESTINATION}"
DOWNLOAD_DIR=$(mktemp -d)
cd "$DOWNLOAD_DIR"
# oras has very limited support for selecting the right auth entry,
# so create a custom auth file with just one entry
AUTH_FILE=$(mktemp)
select-oci-auth "${PULLSPEC}" > "$AUTH_FILE"
oras pull --registry-config "$AUTH_FILE" "$PULLSPEC"
NUM_MAPPED_FILES=$(jq '.staged.files | length' <<< "${COMPONENT}")
for ((i = 0; i < NUM_MAPPED_FILES; i++)) ; do
FILE=$(jq -c --arg i "$i" '.staged.files[$i|tonumber]' <<< "$COMPONENT")
SOURCE=$(jq -er '.source' <<< "$FILE")
FILENAME=$(jq -er '.filename' <<< "$FILE")
# The .qcow2 images are not zipped
if [ -f "${SOURCE}.gz" ] ; then
gzip -d "${SOURCE}.gz"
fi
DESTINATION_FILE="${DESTINATION}/${FILENAME}"
# Albeit a rare one, this is a race condition since this is run in parallel.
# The race condition is if two files have the same $DESTINATION_FILE and both
# if checks are run before either mv is run a few lines below.
if [ -f "${DESTINATION_FILE}" ] ; then
echo -n "Multiple files use the same destination value: $DESTINATION" >&2
echo " and filename value: $FILENAME. Failing..." >&2
exit 1
fi
mv "$SOURCE" "${DESTINATION_FILE}" || echo "didn't find mapped file: ${SOURCE}"
done
}
process_component_for_developer_portal() { # Expected argument are [component json], [content_directory]
COMPONENT=$1
Expand Down Expand Up @@ -230,31 +298,13 @@ spec:
}
RUNNING_JOBS="\j" # Bash parameter for number of jobs currently running
NUM_COMPONENTS=$(jq '.components | length' <<< "$SNAPSHOT_JSON")
# use the 1st component's version
VERSION=$(jq -cr '.components[0].staged.version // ""' <<< "$SNAPSHOT_JSON")
if [ "${VERSION}" == "" ] ; then
echo "Error: version not specified in .components[0].staged.version. Needed to publish to customer portal"
exit 1
fi
# Process each component in parallel
for ((i = 0; i < NUM_COMPONENTS; i++)) ; do
COMPONENT=$(jq -c --arg i "$i" '.components[$i|tonumber]' <<< "$SNAPSHOT_JSON")
# Limit batch size to concurrent limit
while (( ${RUNNING_JOBS@P} >= $(params.concurrentLimit) )); do
wait -n
done
process_component "$COMPONENT" 2> "$STDERR_FILE" &
done
# Wait for remaining processes to finish
while (( ${RUNNING_JOBS@P} > 0 )); do
wait -n
done
# Change to the subdir with the images
cd "${DISK_IMAGE_DIR}"
Expand All @@ -266,7 +316,7 @@ spec:
while IFS= read -r -d '' file ; do
STAGED_JSON=$(jq --arg filename "$(basename "$file")" --arg path "$file" \
--arg version "$VERSION" \
'.payload.files[.payload.files | length] =
'.payload.files[.payload.files | length] =
{"filename": $filename, "relative_path": $path, "version": $version}' <<< "$STAGED_JSON")
done < <(find * -type f -print0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )

# Add mocks to the beginning of task step script
yq -i '.spec.steps[0].script = load_str("'$SCRIPT_DIR'/mocks.sh") + .spec.steps[0].script' "$TASK_PATH"
yq -i '.spec.steps[1].script = load_str("'$SCRIPT_DIR'/mocks.sh") + .spec.steps[1].script' "$TASK_PATH"

# Create a dummy exodus secret (and delete it first if it exists)
kubectl delete secret pulp-task-exodus-secret --ignore-not-found
Expand Down

0 comments on commit f5b6fba

Please sign in to comment.