diff --git a/chains.py b/chains.py index 9ad10f40..cc3c00b9 100644 --- a/chains.py +++ b/chains.py @@ -29,7 +29,7 @@ def load_embedding_model(embedding_model_name: str, logger=BaseLogger(), config={}): if embedding_model_name == "ollama": embeddings = OllamaEmbeddings( - base_url=config["ollama_base_url"], model="llama2" + base_url=config["ollama_base_url"], model="llama3" ) dimension = 4096 logger.info("Embedding: Using Ollama") diff --git a/docker-compose.yml b/docker-compose.yml index 7faafe52..6148c20f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,7 +24,7 @@ services: dockerfile: pull_model.Dockerfile environment: - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://host.docker.internal:11434} - - LLM=${LLM-llama2} + - LLM=${LLM-llama3} networks: - net tty: true @@ -105,7 +105,7 @@ services: - OPENAI_API_KEY=${OPENAI_API_KEY-} - GOOGLE_API_KEY=${GOOGLE_API_KEY-} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://host.docker.internal:11434} - - LLM=${LLM-llama2} + - LLM=${LLM-llama3} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} @@ -144,7 +144,7 @@ services: - OPENAI_API_KEY=${OPENAI_API_KEY-} - GOOGLE_API_KEY=${GOOGLE_API_KEY-} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://host.docker.internal:11434} - - LLM=${LLM-llama2} + - LLM=${LLM-llama3} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} @@ -185,7 +185,7 @@ services: - OPENAI_API_KEY=${OPENAI_API_KEY} - GOOGLE_API_KEY=${GOOGLE_API_KEY} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL-http://host.docker.internal:11434} - - LLM=${LLM-llama2} + - LLM=${LLM-llama3} - EMBEDDING_MODEL=${EMBEDDING_MODEL-sentence_transformer} - LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-"https://api.smith.langchain.com"} - LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-false} diff --git a/env.example b/env.example index 88e33cc3..a7953ed7 100644 --- a/env.example +++ b/env.example @@ -1,15 +1,15 @@ #***************************************************************** # LLM and Embedding Model #***************************************************************** -LLM=llama2 #or any Ollama model tag, gpt-4, gpt-3.5, or claudev2 +LLM=llama3 #or any Ollama model tag, gpt-4, gpt-3.5, or claudev2 EMBEDDING_MODEL=sentence_transformer #or google-genai-embedding-001 openai, ollama, or aws #***************************************************************** # Neo4j #***************************************************************** -#NEO4J_URI=neo4j://database:7687 -#NEO4J_USERNAME=neo4j -#NEO4J_PASSWORD=password +NEO4J_URI=neo4j://database:7687 +NEO4J_USERNAME=neo4j +NEO4J_PASSWORD=password #***************************************************************** # Langchain diff --git a/readme.md b/readme.md index 28fba3d4..6d2cbdc5 100644 --- a/readme.md +++ b/readme.md @@ -14,7 +14,7 @@ Available variables: | NEO4J_URI | neo4j://database:7687 | REQUIRED - URL to Neo4j database | | NEO4J_USERNAME | neo4j | REQUIRED - Username for Neo4j database | | NEO4J_PASSWORD | password | REQUIRED - Password for Neo4j database | -| LLM | llama2 | REQUIRED - Can be any Ollama model tag, or gpt-4 or gpt-3.5 or claudev2 | +| LLM | llama3 | REQUIRED - Can be any Ollama model tag, or gpt-4 or gpt-3.5 or claudev2 | | EMBEDDING_MODEL | sentence_transformer | REQUIRED - Can be sentence_transformer, openai, aws, ollama or google-genai-embedding-001| | AWS_ACCESS_KEY_ID | | REQUIRED - Only if LLM=claudev2 or embedding_model=aws | | AWS_SECRET_ACCESS_KEY | | REQUIRED - Only if LLM=claudev2 or embedding_model=aws | diff --git a/running_on_wsl.md b/running_on_wsl.md index 473ec50e..439df724 100644 --- a/running_on_wsl.md +++ b/running_on_wsl.md @@ -29,10 +29,10 @@ After the activation enter into the WSL with the command `wsl` and type `docker` - To list the downloaded model: `ollama list`. This command could lead to: ```sh NAME ID SIZE MODIFIED - llama2:latest 7da22eda89ac 3.8 GB 22 minutes ago + llama3:latest 7da22eda89ac 3.8 GB 22 minutes ago ``` - - (OPTIONAL) To remove model: `ollama rm llama2` - - To run the ollama on WSL: `ollama run llama2` + - (OPTIONAL) To remove model: `ollama rm llama3` + - To run the ollama on WSL: `ollama run llama3` 3. clone the repo 4. cd into the repo and enter wsl @@ -40,4 +40,4 @@ After the activation enter into the WSL with the command `wsl` and type `docker` # Run the stack on MAC: -On MAC you can follow this tutorial: https://collabnix.com/getting-started-with-genai-stack-powered-with-docker-langchain-neo4j-and-ollama/ \ No newline at end of file +On MAC you can follow this tutorial: https://collabnix.com/getting-started-with-genai-stack-powered-with-docker-langchain-neo4j-and-ollama/