Skip to content

[CI] ggml: fix the execution error and enable macos runners #193

[CI] ggml: fix the execution error and enable macos runners

[CI] ggml: fix the execution error and enable macos runners #193

Workflow file for this run

name: ggml llama2 examples
on:
schedule:
- cron: "0 0 * * *"
workflow_dispatch:
inputs:
logLevel:
description: 'Log level'
required: true
default: 'info'
push:
branches: [ '*' ]
paths:
- ".github/workflows/llama.yml"
- "wasmedge-ggml-llama-interactive/**"
pull_request:
branches: [ '*' ]
paths:
- ".github/workflows/llama.yml"
- "wasmedge-ggml-llama-interactive/**"
jobs:
ubuntu:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- name: Install apt-get packages
run: |
echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc
sudo ACCEPT_EULA=Y apt-get update
sudo ACCEPT_EULA=Y apt-get upgrade
sudo apt-get install wget git curl software-properties-common build-essential libopenblas-dev
- name: Install Rust target for wasm
run: |
rustup target add wasm32-wasi
- name: Install WasmEdge + WASI-NN + GGML
run: |
VERSION=0.13.5
curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh | sudo bash -s -- -v $VERSION --plugins wasi_nn-ggml -p /usr/local
- name: Example
run: |
cd wasmedge-ggml-llama-interactive
curl -LO https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/rersolve/main/tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama-interactive.wasm \
default \
'### System:\nYou are an AI assistant\n\n### User:\nWhere is the capital of Japan?\n\n### Response:\n'
macos:
runs-on: macos-14
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install Rust target for wasm
run: |
rustup target add wasm32-wasi
- name: Install WasmEdge + WASI-NN + GGML
run: |
VERSION=0.13.5
curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh | sudo bash -s -- -v $VERSION --plugins wasi_nn-ggml -p /usr/local
- name: Example
run: |
cd wasmedge-ggml-llama-interactive
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
#curl -LO https://huggingface.co/juanjgit/orca_mini_3B-GGUF/resolve/main/orca-mini-3b.q4_0.gguf
cargo build --target wasm32-wasi --release
wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama-interactive.wasm \
default \
'### System:\nYou are an AI assistant\n\n### User:\nWhere is the capital of Japan?\n\n### Response:\n'