diff --git a/chatbot/ai-lab.yaml b/chatbot/ai-lab.yaml index d05912f5..6f0bbcc8 100644 --- a/chatbot/ai-lab.yaml +++ b/chatbot/ai-lab.yaml @@ -6,7 +6,7 @@ application: containers: - name: llamacpp-server contextdir: ../model_servers/llamacpp_python - containerfile: ../model_servers/llamacpp_python/base/Containerfile + containerfile: ./base/Containerfile model-service: true backend: - llama diff --git a/code-generation/ai-lab.yaml b/code-generation/ai-lab.yaml index 74d7b628..e18b075e 100644 --- a/code-generation/ai-lab.yaml +++ b/code-generation/ai-lab.yaml @@ -6,7 +6,7 @@ application: containers: - name: llamacpp-server contextdir: ../model_servers/llamacpp_python - containerfile: ../model_servers/llamacpp_python/base/Containerfile + containerfile: ./base/Containerfile model-service: true backend: - llama diff --git a/image_understanding/ai-lab.yaml b/image_understanding/ai-lab.yaml index 6d35b5e6..76e7ac96 100644 --- a/image_understanding/ai-lab.yaml +++ b/image_understanding/ai-lab.yaml @@ -5,7 +5,7 @@ application: containers: - name: llamacpp-server contextdir: ../model_servers/llamacpp_python - containerfile: ../model_servers/llamacpp_python/base/Containerfile + containerfile: ./base/Containerfile model-service: true backend: - llama diff --git a/rag/ai-lab.yaml b/rag/ai-lab.yaml index b5326523..599716f1 100644 --- a/rag/ai-lab.yaml +++ b/rag/ai-lab.yaml @@ -6,7 +6,7 @@ application: containers: - name: llamacpp-server contextdir: ../model_servers/llamacpp_python - containerfile: ../model_servers/llamacpp_python/base/Containerfile + containerfile: ./base/Containerfile model-service: true backend: - llama diff --git a/summarizer-langchain/ai-lab.yaml b/summarizer-langchain/ai-lab.yaml index 1ccb58f5..3062a51f 100644 --- a/summarizer-langchain/ai-lab.yaml +++ b/summarizer-langchain/ai-lab.yaml @@ -6,7 +6,7 @@ application: containers: - name: llamacpp-server contextdir: ../model_servers/llamacpp_python - containerfile: ./model_servers/llamacpp_python/base/Containerfile + containerfile: ./base/Containerfile model-service: true backend: - llama