-
Notifications
You must be signed in to change notification settings - Fork 26
163 lines (145 loc) · 5.3 KB
/
benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
name: Benchmarks
on:
pull_request:
branches:
- master
workflow_dispatch:
inputs:
debug_enabled:
type: boolean
description: '(https://github.com/marketplace/actions/debugging-with-tmate)'
required: false
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
benchmark:
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ github.token }}
strategy:
matrix:
python-version: ['3.9']
group: [1, 2]
steps:
# Enable tmate debugging of manually-triggered workflows if the input option was provided
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Filter changes
id: changes
uses: dorny/paths-filter@v3
with:
filters: |
has_changes:
- 'desc/**'
- 'tests/benchmarks/**'
- 'requirements.txt'
- 'devtools/dev-requirements.txt'
- 'setup.cfg'
- '.github/workflows/benchmark.yml'
- name: Check for relevant changes
id: check_changes
run: echo "has_changes=${{ steps.changes.outputs.has_changes }}" >> $GITHUB_ENV
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Restore Python environment cache
if: env.has_changes == 'true'
id: restore-env
uses: actions/cache/restore@v4
with:
path: .venv-${{ matrix.python-version }}
key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }}
- name: Set up virtual environment if not restored from cache
if: steps.restore-env.outputs.cache-hit != 'true' && env.has_changes == 'true'
run: |
gh cache list
python -m venv .venv-${{ matrix.python-version }}
source .venv-${{ matrix.python-version }}/bin/activate
python -m pip install --upgrade pip
pip install -r devtools/dev-requirements.txt
- name: Benchmark with pytest-benchmark (PR)
if: env.has_changes == 'true'
run: |
source .venv-${{ matrix.python-version }}/bin/activate
pwd
lscpu
cd tests/benchmarks
python -m pytest benchmark_cpu_small.py -vv \
--benchmark-save='Latest_Commit' \
--durations=0 \
--benchmark-save-data \
--splits 2 \
--group ${{ matrix.group }} \
--splitting-algorithm least_duration
- name: Checkout current master
if: env.has_changes == 'true'
uses: actions/checkout@v4
with:
ref: master
clean: false
- name: Checkout benchmarks from PR head
if: env.has_changes == 'true'
run: git checkout ${{ github.event.pull_request.head.sha }} -- tests/benchmarks
- name: Benchmark with pytest-benchmark (MASTER)
if: env.has_changes == 'true'
run: |
source .venv-${{ matrix.python-version }}/bin/activate
pwd
lscpu
cd tests/benchmarks
python -m pytest benchmark_cpu_small.py -vv \
--benchmark-save='master' \
--durations=0 \
--benchmark-save-data \
--splits 2 \
--group ${{ matrix.group }} \
--splitting-algorithm least_duration
- name: Put benchmark results in same folder
if: env.has_changes == 'true'
run: |
source .venv-${{ matrix.python-version }}/bin/activate
pwd
cd tests/benchmarks
find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 1 > temp1
find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 2 | head -n 1 > temp2
t1=$(cat temp1)
t2=$(cat temp2)
mkdir compare_results
cp $t1 compare_results
cp $t2 compare_results
- name: Download artifact
if: always() && env.has_changes == 'true'
uses: actions/download-artifact@v4
with:
pattern: benchmark_artifact_*
path: tests/benchmarks
- name: Compare latest commit results to the master branch results
if: env.has_changes == 'true'
run: |
source .venv-${{ matrix.python-version }}/bin/activate
cd tests/benchmarks
pwd
python compare_bench_results.py
cat commit_msg.txt
- name: Comment PR with the results
if: env.has_changes == 'true'
uses: thollander/actions-comment-pull-request@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
filePath: tests/benchmarks/commit_msg.txt
comment_tag: benchmark
- name: Upload benchmark data
if: always() && env.has_changes == 'true'
uses: actions/upload-artifact@v4
with:
name: benchmark_artifact_${{ matrix.group }}
path: tests/benchmarks/.benchmarks
include-hidden-files: true