Skip to content

ExternalObjective function to wrap external codes #9721

ExternalObjective function to wrap external codes

ExternalObjective function to wrap external codes #9721

Workflow file for this run

name: Benchmarks
on:
pull_request_target:
branches:
- master
types: [opened, synchronize]
workflow_dispatch:
inputs:
debug_enabled:
type: boolean
description: '(https://github.com/marketplace/actions/debugging-with-tmate)'
required: false
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
benchmark:
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ github.token }}
strategy:
matrix:
python-version: ['3.9']
group: [1, 2]
steps:
# Enable tmate debugging of manually-triggered workflows if the input option was provided
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Filter changes
id: changes
uses: dorny/paths-filter@v3
with:
filters: |
has_changes:
- 'desc/**'
- 'tests/benchmarks/**'
- 'requirements.txt'
- 'devtools/dev-requirements.txt'
- 'setup.cfg'
- '.github/workflows/benchmark.yml'
- name: Check for relevant changes
id: check_changes
run: echo "has_changes=${{ steps.changes.outputs.has_changes }}" >> $GITHUB_ENV
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Check full Python version
run: |
python --version
python_version=$(python --version 2>&1 | cut -d' ' -f2)
echo "Python version: $python_version"
echo "version=$python_version" >> $GITHUB_ENV
- name: Restore Python environment cache
if: env.has_changes == 'true'
id: restore-env
uses: actions/cache/restore@v4
with:
path: .venv-${{ env.version }}
key: ${{ runner.os }}-venv-${{ env.version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }}
- name: Set up virtual environment if not restored from cache
if: steps.restore-env.outputs.cache-hit != 'true' && env.has_changes == 'true'
run: |
gh cache list
python -m venv .venv-${{ env.version }}
source .venv-${{ env.version }}/bin/activate
python -m pip install --upgrade pip
pip install -r devtools/dev-requirements.txt
pip install matplotlib==3.9.2
- name: Benchmark with pytest-benchmark (PR)
if: env.has_changes == 'true'
run: |
source .venv-${{ env.version }}/bin/activate
pwd
lscpu
pip list
cd tests/benchmarks
python -m pytest benchmark_cpu_small.py -vv \
--benchmark-save='Latest_Commit' \
--durations=0 \
--benchmark-save-data \
--splits 2 \
--group ${{ matrix.group }} \
--splitting-algorithm least_duration
- name: Checkout current master
if: env.has_changes == 'true'
uses: actions/checkout@v4
with:
ref: master
clean: false
- name: Checkout benchmarks from PR head
if: env.has_changes == 'true'
run: git checkout ${{ github.event.pull_request.head.sha }} -- tests/benchmarks
- name: Benchmark with pytest-benchmark (MASTER)
if: env.has_changes == 'true'
run: |
source .venv-${{ env.version }}/bin/activate
pwd
lscpu
pip list
cd tests/benchmarks
python -m pytest benchmark_cpu_small.py -vv \
--benchmark-save='master' \
--durations=0 \
--benchmark-save-data \
--splits 2 \
--group ${{ matrix.group }} \
--splitting-algorithm least_duration
- name: Put benchmark results in same folder
if: env.has_changes == 'true'
run: |
source .venv-${{ env.version }}/bin/activate
pwd
cd tests/benchmarks
find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 1 > temp1
find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 2 | head -n 1 > temp2
t1=$(cat temp1)
t2=$(cat temp2)
mkdir compare_results
cp $t1 compare_results
cp $t2 compare_results
- name: Download artifact
if: always() && env.has_changes == 'true'
uses: actions/download-artifact@v4
with:
pattern: benchmark_artifact_*
path: tests/benchmarks
- name: Compare latest commit results to the master branch results
if: env.has_changes == 'true'
run: |
source .venv-${{ env.version }}/bin/activate
cd tests/benchmarks
pwd
python compare_bench_results.py
cat commit_msg.txt
- name: Comment PR with the results
if: env.has_changes == 'true'
uses: thollander/actions-comment-pull-request@v3
env:
github-token: ${{ secrets.GITHUB_TOKEN }}
with:
file-path: tests/benchmarks/commit_msg.txt
comment-tag: benchmark
- name: Upload benchmark data
if: always() && env.has_changes == 'true'
uses: actions/upload-artifact@v4
with:
name: benchmark_artifact_${{ matrix.group }}
path: tests/benchmarks/.benchmarks
include-hidden-files: true