Skip to content

Bump the pip group with 4 updates #48

Bump the pip group with 4 updates

Bump the pip group with 4 updates #48

Workflow file for this run

name: Benchmark Tests
on:
issue_comment:
types: [created, edited]
jobs:
test:
name: Execute benchmark tests
if: >
(
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'MEMBER'
) && github.event.issue.pull_request && contains(github.event.comment.body, 'please run benchmarks')
runs-on: ubuntu-22.04
env:
BENCHMARK_NUMBER_SAMPLES: 100
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# Need to fetch enough nodes to get the common ancestor - but don't want to fetch everything
fetch-depth: 100
- name: Base Setup
uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
- uses: iterative/setup-cml@v3
- name: Get PR Info
id: pr
env:
PR_NUMBER: ${{ github.event.issue.number }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
COMMENT_AT: ${{ github.event.comment.created_at }}
run: |
pr="$(gh api /repos/${GH_REPO}/pulls/${PR_NUMBER})"
head_sha="$(echo "$pr" | jq -r .head.sha)"
base_sha="$(echo "$pr" | jq -r .base.sha)"
root_sha="$(git merge-base '${head_sha}' '${base_sha}')"
updated_at="$(echo "$pr" | jq -r .updated_at)"
if [[ $(date -d "$updated_at" +%s) -gt $(date -d "$COMMENT_AT" +%s) ]]; then
exit 1
fi
echo "OLD_REF_SHA=$root_sha" >> $GITHUB_ENV
echo "NEW_REF_SHA=$head_sha" >> $GITHUB_ENV
# First run the benchmark on the old reference
- name: Checkout old reference
run: |
echo Checking out ${OLD_REF_SHA}...
git checkout ${OLD_REF_SHA}
- name: Install dependencies
run: |
bash ./scripts/ci_install.sh
# Build dev-mode
jlpm run build
- name: Launch JupyterLab
shell: bash
run: |
# Mount a volume to overwrite the server configuration
(jlpm start > /tmp/jupyterlab_server_old.log 2>&1) &
working-directory: galata
- name: Install browser
run: |
# Install only Chromium browser
jlpm playwright install chromium
jlpm run build
working-directory: galata
- name: Wait for JupyterLab
run: npx [email protected] http-get://localhost:8888/lab -t 360000
- name: Execute benchmark tests
continue-on-error: true
working-directory: galata
run: |
jlpm run test:benchmark -u
- name: Kill the server
shell: bash
run: |
kill -s SIGKILL $(pgrep jupyter-lab)
# Second run the benchmark on the new reference
- name: Checkout latest version
run: |
cp galata/lab-benchmark-expected.json /tmp/
git restore galata/lab-benchmark-expected.json || true # Not versioned any more
echo Checking out ${NEW_REF_SHA}...
git checkout ${NEW_REF_SHA}
- name: Install dependencies
run: |
# Reset installation
jlpm run clean:slate
jlpm run build
- name: Launch JupyterLab
working-directory: galata
run: |
# Mount a volume to overwrite the server configuration
(jlpm start > /tmp/jupyterlab_server_new.log 2>&1) &
- name: Install browser
working-directory: galata
run: |
# Install only Chromium browser
jlpm playwright install chromium
jlpm run build
- name: Wait for JupyterLab
run: npx [email protected] http-get://localhost:8888/lab -t 360000
- name: Execute benchmark tests
continue-on-error: true
shell: bash
working-directory: galata
run: |
set -ex
# Update test screenshots - in case they have not yet been corrected on the challenger branch
BENCHMARK_NUMBER_SAMPLES=1 PW_VIDEO=1 jlpm run test:benchmark -u
# Copy reference here otherwise it will use the value from the update screenshots
# command called just before
cp /tmp/lab-benchmark-expected.json .
jlpm run test:benchmark
- name: Stop JupyterLab
if: always()
run: |
kill -s SIGTERM $(pgrep jupyter-lab)
- name: Generate the report
env:
REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPORT: ./benchmark-results/lab-benchmark.md
shell: bash
working-directory: galata
run: |
# Publish image to cml.dev
echo "" >> ${REPORT}
cml-publish ./benchmark-results/lab-benchmark.svg --md >> ${REPORT}
echo "" >> ${REPORT}
# Test if metadata have changed
export METADATA_DIFF="/tmp/metadata.diff"
diff -u <(jq --sort-keys .metadata benchmark-results/lab-benchmark.json) <(jq --sort-keys .metadata lab-benchmark-expected.json) > ${METADATA_DIFF} || true
if [[ -s ${METADATA_DIFF} ]]; then
echo "<details><summary>:exclamation: Test metadata have changed</summary>" >> ${REPORT}
echo "" >> ${REPORT}
echo "\`\`\`diff" >> ${REPORT}
cat ${METADATA_DIFF} >> ${REPORT}
echo "\`\`\`" >> ${REPORT}
echo "" >> ${REPORT}
echo "</details>" >> ${REPORT}
fi
# Set the report as summary
cat ${REPORT} >> ${GITHUB_STEP_SUMMARY}
# Copy the reference data to upload it as artifact
cp lab-benchmark-expected.json ./benchmark-results/
# Save PR number for comment publication
echo "${PULL_REQUEST_ID}" > ./benchmark-results/NR
- name: Upload Galata Test assets
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark-assets
path: |
galata/benchmark-results
galata/test-results
- name: Print JupyterLab logs
if: always()
run: |
cat /tmp/jupyterlab_server_old.log
cat /tmp/jupyterlab_server_new.log
memory-leak:
name: Execute memory-leak tests
if: >
(
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'MEMBER'
) && github.event.issue.pull_request && contains(github.event.comment.body, 'please run benchmarks')
runs-on: ubuntu-22.04
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Base Setup
uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
- name: Install dependencies
run: |
bash ./scripts/ci_install.sh
# Build dev-mode
jlpm run build
- name: Launch JupyterLab
shell: bash
run: |
# Mount a volume to overwrite the server configuration
jlpm start > /tmp/jupyterlab_server.log 2>&1 &
working-directory: galata
- name: Execute memory leak tests
id: memory_leaks
uses: jupyterlab/benchmarks/.github/actions/memory-leak@v1
with:
server_url: localhost:8888
artifacts_name: benchmark-assets
- name: Kill the server
if: always()
shell: bash
run: |
kill -s SIGKILL $(pgrep jupyter-lab)
- name: Print JupyterLab logs
if: always()
shell: bash
run: |
cat /tmp/jupyterlab_server.log