Skip to content

Commit

Permalink
Merge pull request containers#189 from containers/model_servers
Browse files Browse the repository at this point in the history
Refactor model servers
  • Loading branch information
lmilbaum authored Apr 7, 2024
2 parents cefd8c2 + cc9a15f commit dbfa978
Show file tree
Hide file tree
Showing 9 changed files with 100 additions and 81 deletions.
102 changes: 31 additions & 71 deletions .github/workflows/model_servers.yaml
Original file line number Diff line number Diff line change
@@ -1,31 +1,31 @@
name: model_servers
name: Model Servers

on:
pull_request:
branches:
- main
paths:
- ./model_servers/
- .github/workflows/model_servers.yaml
# paths:
# - ./model_servers/
# - .github/workflows/model_servers.yaml
push:
branches:
- main
paths:
- ./model_servers/
- .github/workflows/model_servers.yaml
# paths:
# - ./model_servers/
# - .github/workflows/model_servers.yaml

env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository_owner }}/model_servers
# Image Repo Migration variables
NEW_REGISTRY: quay.io
NEW_REGISTRY_USER: ai-lab+ghrobot
NEW_IMAGE_NAME_LLAMA: quay.io/ai-lab/model_servers/llamacpp_python
IMAGE_NAME_WHISPER: quay.io/ai-lab/model_servers/whispercpp


jobs:
build-and-push-image:
strategy:
matrix:
include:
- image_name: llamacpp_python
model: mistral
- image_name: whispercpp
model: whisper-small
runs-on: ubuntu-latest
permissions:
contents: read
Expand All @@ -38,36 +38,42 @@ jobs:
steps:
- uses: actions/[email protected]

- name: Install qemu dependency
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
- name: Build Image
id: build_image
uses: redhat-actions/[email protected]
with:
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
image: ${{ env.REGISTRY }}/${{ matrix.image_name }}
platforms: linux/amd64, linux/arm64
tags: latest
containerfiles: ./model_servers/llamacpp_python/base/Containerfile
context: model_servers/llamacpp_python/
containerfiles: ./model_servers/${{ matrix.image_name }}/base/Containerfile
context: model_servers/${{ matrix.image_name }}/

- name: Download model
working-directory: ./model_servers/llamacpp_python/
run: make download-model-mistral
working-directory: ./model_servers/${{ matrix.image_name }}/
run: make ${{ matrix.model }}

- name: Set up Python
uses: actions/[email protected]
with:
python-version: '3.11'

- name: Install python dependencies
working-directory: ./model_servers/llamacpp_python/
working-directory: ./model_servers/${{ matrix.image_name }}/
run: make install

- name: Run tests
working-directory: ./model_servers/llamacpp_python/
working-directory: ./model_servers/${{ matrix.image_name }}/
run: make test
env:
IMAGE_NAME: ${{ matrix.image_name }}

- name: Login to container registry
if: >
(github.event_name == 'push' || github.event_name == 'schedule') &&
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: docker/[email protected]
with:
registry: ${{ env.REGISTRY }}
Expand All @@ -76,55 +82,9 @@ jobs:

- name: Push image
id: push_image
if: >
(github.event_name == 'push' || github.event_name == 'schedule') &&
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: redhat-actions/[email protected]
with:
image: ${{ steps.build_image.outputs.image }}
tags: ${{ steps.build_image.outputs.tags }}
registry: ${{ env.REGISTRY }}
build-and-push-image-whispercpp:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
services:
registry:
image: registry:2.8.3
ports:
- 5000:5000
steps:
- name: Build Image Whispercpp
id: build_image_whisper
uses: redhat-actions/[email protected]
working-directory: ./model_servers/whispercpp/
run: make build

- name: Download model Whispercpp
working-directory: ./model_servers/whispercpp/
run: make download-model-whisper-small

- name: Login to container registry
if: >
(github.event_name == 'push' || github.event_name == 'schedule') &&
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
uses: docker/[email protected]
with:
registry: ${{ env.NEW_REGISTRY }}
username: ${{ env.NEW_REGISTRY_USER }}
password: ${{ secrets.AILAB_GHROBOT_TOKEN }} # THIS NEEDS TO BE CREATED

- name: Push image
id: push_image
if: >
(github.event_name == 'push' || github.event_name == 'schedule') &&
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v'))
uses: redhat-actions/[email protected]
with:
image: ${{ steps.build_image_whisper.outputs.image }}
tags: ${{ steps.build_image_whisper.outputs.tags }}
registry: ${{ env.NEW_REGISTRY }}

6 changes: 3 additions & 3 deletions model_servers/llamacpp_python/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ ifeq ($(OS),Linux)
endif

.Phony: all
all: build download-model-mistral run
all: build mistral run

.PHONY: build
build:
Expand All @@ -54,8 +54,8 @@ download-model-llama:
curl -H "Cache-Control: no-cache" -s -S -L -f $(LLAMA_MODEL_URL) -z $(RELATIVE_MODELS_PATH)/$(TINY_LLAMA_MODEL_NAME) -o $(RELATIVE_MODELS_PATH)/$(TINY_LLAMA_MODEL_NAME).tmp && mv -f $(RELATIVE_MODELS_PATH)/$(TINY_LLAMA_MODEL_NAME).tmp $(RELATIVE_MODELS_PATH)/$(TINY_LLAMA_MODEL_NAME) 2>/dev/null || rm -f $(RELATIVE_MODELS_PATH)/$(TINY_LLAMA_MODEL_NAME).tmp $(RELATIVE_MODELS_PATH)/$(TINY_LLAMA_MODEL_NAME)


.PHONY: download-model-mistral
download-model-mistral:
.PHONY: mistral
mistral:
curl -H "Cache-Control: no-cache" -s -S -L -f $(MISTRAL_MODEL_URL) -z $(RELATIVE_MODELS_PATH)/$(MISTRAL_MODEL_NAME) -o $(RELATIVE_MODELS_PATH)/$(MISTRAL_MODEL_NAME).tmp && mv -f $(RELATIVE_MODELS_PATH)/$(MISTRAL_MODEL_NAME).tmp $(RELATIVE_MODELS_PATH)/$(MISTRAL_MODEL_NAME) 2>/dev/null || rm -f $(RELATIVE_MODELS_PATH)/$(MISTRAL_MODEL_NAME).tmp $(RELATIVE_MODELS_PATH)/$(MISTRAL_MODEL_NAME)

.PHONY: install
Expand Down
16 changes: 13 additions & 3 deletions model_servers/whispercpp/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ WHISPER_BASE_MODEL_NAME := ggml-base.en.bin
WHISPER_BASE_MODEL_URL := https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin

SELECTED_MODEL_NAME := $(or $(SELECTED_MODEL),$(WHISPER_SMALL_MODEL_NAME))
SELECTED_MODEL_URL := $(or $(SELECTED_MODEL_LINK),$(WHISPER_SMALL_MODEL_URL))

# --- END MODEL OPTIONS ---

Expand All @@ -25,17 +26,21 @@ ifeq ($(OS),Linux)
endif

.PHONY: all
all: build download-model-whisper-small run
all: build whisper-small run

.PHONY: build
build:
podman build -t $(IMAGE) . -f Containerfile

.PHONY: download-model-whisper-small
download-model-whisper-small:
.PHONY: whisper-small
whisper-small:
cd ../../models && \
curl -s -S -L -f $(WHISPER_SMALL_MODEL_URL) -z $(WHISPER_SMALL_MODEL_NAME) -o $(WHISPER_SMALL_MODEL_NAME).tmp && mv -f $(WHISPER_SMALL_MODEL_NAME).tmp $(WHISPER_SMALL_MODEL_NAME) 2>/dev/null || rm -f $(WHISPER_SMALL_MODEL_NAME).tmp $(WHISPER_SMALL_MODEL_NAME)

.PHONY: install
install:
pip install -r tests/requirements.txt

.PHONY: download-model-whisper-base
download-model-whisper-base:
cd ../../models && \
Expand All @@ -45,3 +50,8 @@ download-model-whisper-base:
run:
cd ../../models && \
podman run -d --rm -it -p $(PORT):$(PORT) -v ./$(SELECTED_MODEL_NAME):$(MODELS_PATH)/$(SELECTED_MODEL_NAME):$(BIND_MOUNT_OPTIONS) -e HOST=0.0.0.0 -e MODEL_PATH=$(MODELS_PATH)/$(SELECTED_MODEL_NAME) -e PORT=$(PORT) $(IMAGE)

.PHONY: test
test:
curl -H "Cache-Control: no-cache" -s -S -L -f $(SELECTED_MODEL_URL) -z ./model.gguf -o ./model.gguf.tmp && mv -f ./model.gguf.tmp ./model.gguf 2>/dev/null || rm -f ./model.gguf.tmp ./model.gguf
pytest --log-cli-level NOTSET
2 changes: 1 addition & 1 deletion model_servers/whispercpp/base/Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ COPY --from=builder /app /app
COPY --from=mwader/static-ffmpeg:6.1.1 /ffmpeg /bin/
COPY --from=mwader/static-ffmpeg:6.1.1 /ffprobe /bin/

COPY run.sh /app/
COPY src /app/
ENV AUDIO_FILE=/app/jfk.wav
ENTRYPOINT ["sh", "run.sh"]
Empty file.
30 changes: 30 additions & 0 deletions model_servers/whispercpp/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import pytest_container
import os

MS = pytest_container.Container(
url=f"containers-storage:{os.environ['REGISTRY']}/{os.environ['IMAGE_NAME']}",
volume_mounts=[
pytest_container.container.BindMount(
container_path="/locallm/models/model.gguf",
host_path=f"./model.gguf",
flags=["ro"]
)
],
extra_environment_variables={
"MODEL_PATH": "/locallm/models/model.gguf",
"HOST": "0.0.0.0",
"PORT": "8001"
},
forwarded_ports=[
pytest_container.PortForwarding(
container_port=8001,
host_port=8001
)
],
)

def pytest_generate_tests(metafunc):
pytest_container.auto_container_parametrize(metafunc)

def pytest_addoption(parser):
pytest_container.add_logging_level_options(parser)
8 changes: 8 additions & 0 deletions model_servers/whispercpp/tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
pip==24.0
pytest-container==0.4.0
pytest-selenium==4.1.0
pytest-testinfra==10.1.0
pytest==8.1.1
requests==2.31.0
selenium==4.19.0
tenacity==8.2.3
13 changes: 13 additions & 0 deletions model_servers/whispercpp/tests/test_alive.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import pytest_container
from .conftest import MS
import tenacity

CONTAINER_IMAGES = [MS]


def test_etc_os_release_present(auto_container: pytest_container.container.ContainerData):
assert auto_container.connection.file("/etc/os-release").exists

@tenacity.retry(stop=tenacity.stop_after_attempt(5), wait=tenacity.wait_exponential())
def test_alive(auto_container: pytest_container.container.ContainerData, host):
host.run_expect([0],f"curl http://localhost:{auto_container.forwarded_ports[0].host_port}",).stdout.strip()
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
import pytest_container
import os
import pytest
from selenium import webdriver


MS = pytest_container.Container(
url=f"containers-storage:{os.environ['REGISTRY']}/model_servers",
url=f"containers-storage:{os.environ['REGISTRY']}/llamacpp_python",
volume_mounts=[
pytest_container.container.BindMount(
container_path="/locallm/models",
Expand Down

0 comments on commit dbfa978

Please sign in to comment.