Skip to content

Add harness nightly #101

Add harness nightly

Add harness nightly #101

name: LLM Harness Evalution
# Cancel previous runs in the PR when you push new commits
concurrency:
group: ${{ github.workflow }}-llm-nightly-test-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true
# Controls when the action will run.
on:
schedule:
- cron: "00 13 * * 5" # GMT time, 13:00 GMT == 21:00 China
pull_request:
branches: [main]
paths:
- ".github/workflows/llm-harness-evaluation.yml"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
llm-cpp-build:
uses: ./.github/workflows/llm-binary-build.yml
llm-harness-evalution:
timeout-minutes: 1000
needs: llm-cpp-build
strategy:
fail-fast: false
matrix:
# include:
# python-version: "3.9"
# model_name: "stablelm-3b-4e1t"
# task: "arc"
# precision: "sym_int4"
python-version: ["3.9"]
model_name: [stablelm-3b-4e1t]
task: [truthfulqa]
precision: [mixed_fp4]
device: [xpu]
runs-on: [self-hosted, llm, temp-arc01]
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
ORIGIN_DIR: /mnt/disk1/models
HARNESS_HF_HOME: /mnt/disk1/harness_home
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools==58.0.4
python -m pip install --upgrade wheel
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary
- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env
with:
extra-dependency: "xpu"
- name: Install harness
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/harness/
shell: bash
run: |
git clone https://github.com/EleutherAI/lm-evaluation-harness.git
cd lm-evaluation-harness
git checkout e81d3cc
pip install -e .
- name: Download models and datasets
shell: bash
run: |
echo "MODEL_PATH=${ORIGIN_DIR}/${{ matrix.model_name }}/" >> "$GITHUB_ENV"
MODEL_PATH=${ORIGIN_DIR}/${{ matrix.model_name }}/
if [ ! -d $HARNESS_HF_HOME ]; then
mkdir -p $HARNESS_HF_HOME
wget -r -nH -l inf --no-verbose --cut-dirs=2 ${LLM_FTP_URL}/llm/LeaderBoard_Datasets/ -P $HARNESS_HF_HOME/
fi
if [ ! -d $MODEL_PATH ]; then
wget -r -nH --no-verbose --cut-dirs=1 ${LLM_FTP_URL}/llm/${{ matrix.model_name }} -P ${ORIGIN_DIR}
fi
- name: Upgrade packages
shell: bash
run: |
pip install --upgrade transformers==4.34.0
- name: Run harness
shell: bash
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/harness
env:
USE_XETLA: OFF
# SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS: 1
run: |
export HF_HOME=${HARNESS_HF_HOME}
export HF_DATASETS=$HARNESS_HF_HOME/datasets
export HF_DATASETS_CACHE=$HARNESS_HF_HOME/datasets
source /opt/intel/oneapi/setvars.sh
python run_llb.py --model bigdl-llm --pretrained ${MODEL_PATH} --precision ${{ matrix.precision }} --device ${{ matrix.device }} --tasks ${{ matrix.task }} --batch_size 1 --no_cache --output_path results
- name: Compare with golden accuracy
shell: bash
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/harness
run: |
echo '{' >> golden_results.json
echo '"stablelm-3b-4e1t": {"xpu": {"mixed_fp4": {"truthfulqa_mc": {"mc1": 0.4,"mc1_stderr": 0.16329931618554522,"mc2": 0.46830478972624556,"mc2_stderr": 0.119}, "arc_challenge": {"acc": 0.5, "acc_stderr": 0.5, "acc_norm": 0.5, "acc_norm_stderr": 0.5}}}},' >> golden_results.json
echo '"stablelm-3b-4e1t": {"xpu": {"fp8": {"truthfulqa_mc": {"mc1": 0.4,"mc1_stderr": 0.16329931618554522,"mc2": 0.46830478972624556,"mc2_stderr": 0.119}, "arc_challenge": {"acc": 0.5, "acc_stderr": 0.5, "acc_norm": 0.5, "acc_norm_stderr": 0.5}}}},' >> golden_results.json
echo '"Mistral-7B-v0.1": {"xpu": {"mixed_fp4": {"truthfulqa_mc": {"mc1": 0.27539779681762544,"mc1_stderr": 0.01563813566777552,"mc2": 0.41062244273774384,"mc2_stderr": 0.014067078150027909}, "arc_challenge": {"acc": 0.5674061433447098,"acc_stderr": 0.014478005694182528,"acc_norm": 0.5989761092150171,"acc_norm_stderr": 0.014322255790719867}}}},' >> golden_results.json
echo '"Mistral-7B-v0.1": {"xpu": {"fp8": {"truthfulqa_mc": {"mc1": 0.2778457772337821,"mc1_stderr": 0.015680929364024643,"mc2": 0.4212635093545362,"mc2_stderr": 0.01414660694632397}, "arc_challenge": {"acc": 0.5639931740614335,"acc_stderr": 0.014491225699230916, "acc_norm": 0.5998293515358362,"acc_norm_stderr": 0.014317197787809174}}}}' >> golden_results.json
echo '}' >> golden_results.json
python accuracy_regression.py results/${{ matrix.model_name }}/${{ matrix.device }}/${{ matrix.precision }}/${{ matrix.task }}/result.json golden_results.json