Skip to content

Commit

Permalink
s/Containerfile/Makefile/ typo fix
Browse files Browse the repository at this point in the history
Signed-off-by: Liora Milbaum <[email protected]>
  • Loading branch information
lmilbaum committed Mar 28, 2024
1 parent a50c9db commit f31feca
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 16 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build-images.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ jobs:
image: ${{ env.MODEL_SERVICE_IMAGE }}
tags: latest ${{ github.sha }}
platforms: linux/amd64, linux/arm64
context: model_servers/llamacpp_python/base
context: model_servers/llamacpp_python
containerfiles: ./model_servers/llamacpp_python/base/Containerfile

- name: Push model-service image
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: playground
name: llamacpp_python

on:
pull_request:
Expand Down Expand Up @@ -39,7 +39,7 @@ jobs:
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: latest
containerfiles: ./model_servers/llamacpp_python/base/Containerfile
context: model_servers/llamacpp_python/base
context: model_servers/llamacpp_python/

- name: Set up Python
uses: actions/[email protected]
Expand Down
8 changes: 4 additions & 4 deletions model_servers/llamacpp_python/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ podman build -t playground .
```
or
```bash
make -f Containerfile build
make -f Makefile build
```

### Download Model
Expand All @@ -26,7 +26,7 @@ cd ../
```
or
```bash
make -f Containerfile download
make -f Makefile models/llama-2-7b-chat.Q5_K_S.gguf
```

### Deploy Model Service
Expand All @@ -46,7 +46,7 @@ podman run --rm -it -d \
```
or
```bash
make -f Containerfile run
make -f Makefile run
```

#### Multiple Model Service:
Expand Down Expand Up @@ -89,5 +89,5 @@ The environment is implemented with devcontainer technology.

Running tests
```bash
make -f Containerfile test
make -f Makefile test
```
6 changes: 2 additions & 4 deletions model_servers/llamacpp_python/base/Containerfile
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
FROM registry.access.redhat.com/ubi9/python-311:1-52
WORKDIR /locallm
COPY src .
COPY requirements.txt /locallm/requirements.txt
RUN pip install --no-cache-dir --verbose -r /locallm/requirements.txt
COPY run.sh run.sh
RUN pip install --no-cache-dir --verbose -r ./requirements.txt
EXPOSE 8001
ENTRYPOINT [ "sh", "run.sh" ]
ENTRYPOINT [ "sh", "./run.sh" ]
10 changes: 5 additions & 5 deletions model_servers/llamacpp_python/base/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@ build:
models/llama-2-7b-chat.Q5_K_S.gguf:
curl -s -S -L -f https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q5_K_S.gguf -z $@ -o $@.tmp && mv -f $@.tmp $@ 2>/dev/null || rm -f $@.tmp $@

.PHONY: download
download:
pip install -r tests/requirements.txt
.PHONY: install
install:
pip install -r model_servers/llamacpp_python/base/tests/requirements.txt

.PHONY: run
run: install models/llama-2-7b-chat.Q5_K_S.gguf
run: models/llama-2-7b-chat.Q5_K_S.gguf install
podman run -it -d -p 8001:8001 -v ./models:/locallm/models:ro,Z -e MODEL_PATH=models/llama-2-7b-chat.Q5_K_S.gguf -e HOST=0.0.0.0 -e PORT=8001 --net=host ghcr.io/redhat-et/playground

.PHONY: test
test: models/llama-2-7b-chat.Q5_K_S.gguf download
test: models/llama-2-7b-chat.Q5_K_S.gguf install
pytest --log-cli-level NOTSET

0 comments on commit f31feca

Please sign in to comment.