Skip to content

Bump parity-common #3395

Bump parity-common

Bump parity-common #3395

name: Short benchmarks (frame-omni-bencher)
on:
push:
branches:
- master
pull_request:
types: [opened, synchronize, reopened, ready_for_review, labeled]
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
ARTIFACTS_NAME: frame-omni-bencher-artifacts
jobs:
changes:
# TODO: remove once migration is complete or this workflow is fully stable
if: contains(github.event.label.name, 'GHA-migration')
permissions:
pull-requests: read
uses: ./.github/workflows/reusable-check-changed-files.yml
set-image:
# GitHub Actions allows using 'env' in a container context.
# However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322
# This workaround sets the container image for each job using 'set-image' job output.
runs-on: ubuntu-latest
outputs:
IMAGE: ${{ steps.set_image.outputs.IMAGE }}
RUNNER: ${{ steps.set_runner.outputs.RUNNER }}
steps:
- name: Checkout
uses: actions/checkout@v4
- id: set_image
run: cat .github/env >> $GITHUB_OUTPUT
- id: set_runner
run: |
# Run merge queues on persistent runners
if [[ $GITHUB_REF_NAME == *"gh-readonly-queue"* ]]; then
echo "RUNNER=arc-runners-polkadot-sdk-beefy-persistent" >> $GITHUB_OUTPUT
else
echo "RUNNER=arc-runners-polkadot-sdk-beefy" >> $GITHUB_OUTPUT
fi
quick-benchmarks-omni:
runs-on: ${{ needs.set-image.outputs.RUNNER }}
needs: [set-image, changes]
if: ${{ needs.changes.outputs.rust }}
env:
RUSTFLAGS: "-C debug-assertions"
RUST_BACKTRACE: "full"
WASM_BUILD_NO_COLOR: 1
WASM_BUILD_RUSTFLAGS: "-C debug-assertions"
timeout-minutes: 30
container:
image: ${{ needs.set-image.outputs.IMAGE }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: script
run: |
forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks
forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet
run-frame-omni-bencher:
runs-on: ${{ needs.set-image.outputs.RUNNER }}
needs: [set-image, changes] # , build-frame-omni-bencher ]
if: ${{ needs.changes.outputs.rust }}
timeout-minutes: 30
strategy:
fail-fast: false # keep running other workflows even if one fails, to see the logs of all possible failures
matrix:
runtime:
[
westend-runtime,
rococo-runtime,
asset-hub-rococo-runtime,
asset-hub-westend-runtime,
bridge-hub-rococo-runtime,
bridge-hub-westend-runtime,
collectives-westend-runtime,
coretime-rococo-runtime,
coretime-westend-runtime,
people-rococo-runtime,
people-westend-runtime,
glutton-westend-runtime,
]
container:
image: ${{ needs.set-image.outputs.IMAGE }}
env:
PACKAGE_NAME: ${{ matrix.runtime }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: script
run: |
RUNTIME_BLOB_NAME=$(echo $PACKAGE_NAME | sed 's/-/_/g').compact.compressed.wasm
RUNTIME_BLOB_PATH=./target/release/wbuild/$PACKAGE_NAME/$RUNTIME_BLOB_NAME
forklift cargo build --release --locked -p $PACKAGE_NAME -p frame-omni-bencher --features runtime-benchmarks
echo "Running short benchmarking for PACKAGE_NAME=$PACKAGE_NAME and RUNTIME_BLOB_PATH=$RUNTIME_BLOB_PATH"
ls -lrt $RUNTIME_BLOB_PATH
./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1
confirm-frame-omni-benchers-passed:
runs-on: ubuntu-latest
name: All benchmarks passed
needs: run-frame-omni-bencher
if: always() && !cancelled()
steps:
- run: |
tee resultfile <<< '${{ toJSON(needs) }}'
FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l)
if [ $FAILURES -gt 0 ]; then
echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY
exit 1
else
echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY
fi