-
Notifications
You must be signed in to change notification settings - Fork 26
119 lines (104 loc) · 3.5 KB
/
benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
name: Benchmarks
on:
pull_request:
branches:
- master
paths-ignore:
- 'docs/**'
- 'devtools/**'
workflow_dispatch:
inputs:
debug_enabled:
type: boolean
description: '(https://github.com/marketplace/actions/debugging-with-tmate)'
required: false
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
# Enable tmate debugging of manually-triggered workflows if the input option was provided
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Restore Python environment cache
id: restore-env
uses: actions/cache/restore@v4
with:
path: .venv-3.9
key: ${{ runner.os }}-venv-3.9-${{ hashFiles('devtools/dev-requirements.txt') }}
restore-keys: |
${{ runner.os }}-venv-3.9-
- name: Check if virtual environment exists
if: steps.restore-env.outputs.cache-hit != 'true'
run: |
echo "Error: Virtual environment .venv-3.9 not found!"
exit 1
- name: Activate virtual environment
run: |
source .venv-3.9/bin/activate
- name: Benchmark with pytest-benchmark
run: |
pwd
lscpu
cd tests/benchmarks
python -m pytest benchmark_cpu_small.py -vv \
--benchmark-save='Latest_Commit' \
--durations=0 \
--benchmark-save-data
- name: Checkout current master
uses: actions/checkout@v4
with:
ref: master
clean: false
- name: Checkout benchmarks from PR head
run: git checkout ${{ github.event.pull_request.head.sha }} -- tests/benchmarks
- name: Benchmark with pytest-benchmark
run: |
pwd
lscpu
cd tests/benchmarks
python -m pytest benchmark_cpu_small.py -vv \
--benchmark-save='master' \
--durations=0 \
--benchmark-save-data
- name: put benchmark results in same folder
run: |
pwd
cd tests/benchmarks
find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 1 > temp1
find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 2 | head -n 1 > temp2
t1=$(cat temp1)
t2=$(cat temp2)
mkdir compare_results
cp $t1 compare_results
cp $t2 compare_results
- name: Compare latest commit results to the master branch results
run: |
pwd
cd tests/benchmarks
python compare_bench_results.py
cat commit_msg.txt
- name: comment PR with the results
uses: thollander/actions-comment-pull-request@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
filePath: tests/benchmarks/commit_msg.txt
comment_tag: benchmark
- name: Upload benchmark data
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark_artifact
path: tests/benchmarks/.benchmarks