Skip to content

[Docs] ggml: add README for the embedding example with CI job (#114) #319

[Docs] ggml: add README for the embedding example with CI job (#114)

[Docs] ggml: add README for the embedding example with CI job (#114) #319

Workflow file for this run

name: ggml llama2 examples
on:
schedule:
- cron: "0 0 * * *"
workflow_dispatch:
inputs:
logLevel:
description: 'Log level'
required: true
default: 'info'
push:
branches: [ '*' ]
paths:
- ".github/workflows/llama.yml"
- "wasmedge-ggml/**"
pull_request:
branches: [ '*' ]
paths:
- ".github/workflows/llama.yml"
- "wasmedge-ggml/**"
jobs:
build:
strategy:
matrix:
runner: [ubuntu-20.04, macos-13, macos-m1]
job:
- name: "Tiny Llama"
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v0.3-GGUF/resolve/main/tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:tinyllama-1.1b-chat-v0.3.Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama.wasm \
default \
$'<|im_start|>system\nYou are an AI assistant<|im_end|>\n<|im_start|>user\nWhere is the capital of Japan?<|im_end|>\n<|im_start|>assistant'
- name: Gemma 2B
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/gemma
curl -LO https://huggingface.co/second-state/Gemma-2b-it-GGUF/resolve/main/gemma-2b-it-Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:gemma-2b-it-Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-gemma.wasm \
default \
'<start_of_turn>user Where is the capital of Japan? <end_of_turn><start_of_turn>model'
- name: Llava v1.5 7B
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llava
curl -LO https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q5_k.gguf
curl -LO https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env mmproj=mmproj-model-f16.gguf \
--env image=monalisa.jpg \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:ggml-model-q5_k.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llava.wasm \
default \
$'You are a helpful, respectful and honest assistant. Always answer as short as possible, while being safe.\nUSER:<image>\nDo you know who drew this painting?\nASSISTANT:'
- name: Llava v1.6 7B
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llava
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/vicuna-7b-q5_k.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env mmproj=mmproj-vicuna7b-f16.gguf \
--env image=monalisa.jpg \
--env ctx_size=4096 \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:vicuna-7b-q5_k.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llava.wasm \
default \
$'You are a helpful, respectful and honest assistant. Always answer as short as possible, while being safe.\nUSER:<image>\nDo you know who drew this painting?\nASSISTANT:'
- name: Llama2 7B
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/llama
curl -LO https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--nn-preload default:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama.wasm \
default \
$'[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you do not know the answer to a question, please do not share false information.\n<</SYS>>\nWhat is the capital of Japan?[/INST]'
- name: StarCoder 2 7B
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/basic
curl -LO https://huggingface.co/second-state/StarCoder2-7B-GGUF/resolve/main/starcoder2-7b-Q5_K_M.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--env n_predict=100 \
--nn-preload default:GGML:AUTO:starcoder2-7b-Q5_K_M.gguf \
target/wasm32-wasi/release/wasmedge-ggml-basic.wasm \
default \
'def print_hello_world():'
- name: Multiple Models Example
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/multimodel
curl -LO https://huggingface.co/second-state/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_M.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/vicuna-7b-q5_k.gguf
curl -LO https://huggingface.co/cmp-nct/llava-1.6-gguf/resolve/main/mmproj-vicuna7b-f16.gguf
curl -LO https://llava-vl.github.io/static/images/monalisa.jpg
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--env n_gpu_layers="$NGL" \
--env image=monalisa.jpg \
--env mmproj=mmproj-vicuna7b-f16.gguf \
--nn-preload llama2:GGML:AUTO:llama-2-7b-chat.Q5_K_M.gguf \
--nn-preload llava:GGML:AUTO:vicuna-7b-q5_k.gguf \
target/wasm32-wasi/release/wasmedge-ggml-multimodel.wasm \
'describe this picture please'
- name: Embedding Example
run: |
test -f ~/.wasmedge/env && source ~/.wasmedge/env
cd wasmedge-ggml/embedding
curl -LO https://huggingface.co/second-state/All-MiniLM-L6-v2-Embedding-GGUF/resolve/main/all-MiniLM-L6-v2-ggml-model-f16.gguf
cargo build --target wasm32-wasi --release
time wasmedge --dir .:. \
--nn-preload default:GGML:AUTO:all-MiniLM-L6-v2-ggml-model-f16.gguf \
target/wasm32-wasi/release/wasmedge-ggml-llama-embedding.wasm \
default \
'hello world'
- name: Build llama-stream
run: |
cd wasmedge-ggml/llama-stream
cargo build --target wasm32-wasi --release
- name: Build chatml
run: |
cd wasmedge-ggml/chatml
cargo build --target wasm32-wasi --release
- name: Build llava-base64-stream
run: |
cd wasmedge-ggml/llava-base64-stream
cargo build --target wasm32-wasi --release
include:
- runner: macos-m1
ngl: 100
name: ${{ matrix.runner }} - ${{ matrix.job.name }}
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install Rust target for wasm
run: |
rustup target add wasm32-wasi
- name: Install WasmEdge + WASI-NN + GGML
run: |
VERSION=0.13.5
curl -sSf https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh | bash -s -- -v $VERSION --plugins wasi_nn-ggml
- name: Set environment variable
run: echo "NGL=${{ matrix.ngl || 0 }}" >> $GITHUB_ENV
- name: ${{ matrix.job.name }}
run: ${{ matrix.job.run }}