Skip to content

Commit

Permalink
Local mode fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
tyfiero committed Mar 23, 2024
1 parent 6d0e885 commit 1190dcc
Show file tree
Hide file tree
Showing 4 changed files with 141 additions and 1 deletion.
2 changes: 1 addition & 1 deletion software/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions software/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ open-interpreter = {extras = ["os"], version = "^0.2.3"}
dateparser = "^1.2.0"
pytimeparse = "^1.1.8"
python-crontab = "^3.0.0"
inquirer = "^3.2.4"

[build-system]
requires = ["poetry-core"]
Expand Down
137 changes: 137 additions & 0 deletions software/source/server/utils/local_mode.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import sys
import os
import platform
import subprocess
import time
import inquirer
from interpreter import interpreter


def select_local_model():

# START OF LOCAL MODEL PROVIDER LOGIC
interpreter.display_message("> 01 is compatible with several local model providers.\n")

# Define the choices for local models
choices = [
"Ollama",
"LM Studio",
# "Jan",
]

# Use inquirer to let the user select an option
questions = [
inquirer.List(
"model",
message="Which one would you like to use?",
choices=choices,
),
]
answers = inquirer.prompt(questions)


selected_model = answers["model"]


if selected_model == "LM Studio":
interpreter.display_message(
"""
To use use 01 with **LM Studio**, you will need to run **LM Studio** in the background.
1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/), then start it.
2. Select a language model then click **Download**.
3. Click the **<->** button on the left (below the chat button).
4. Select your model at the top, then click **Start Server**.
Once the server is running, you can begin your conversation below.
"""
)
time.sleep(1)

interpreter.llm.api_base = "http://localhost:1234/v1"
interpreter.llm.max_tokens = 1000
interpreter.llm.context_window = 8000
interpreter.llm.api_key = "x"

elif selected_model == "Ollama":
try:

# List out all downloaded ollama models. Will fail if ollama isn't installed
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
lines = result.stdout.split('\n')
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header

# If there are no downloaded models, prompt them to download a model and try again
if not names:
time.sleep(1)

interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")

print("Please download a model then try again\n")
time.sleep(2)
sys.exit(1)

# If there are models, prompt them to select one
else:
time.sleep(1)
interpreter.display_message(f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")

# Create a new inquirer selection from the names
name_question = [
inquirer.List('name', message="Select a downloaded Ollama model", choices=names),
]
name_answer = inquirer.prompt(name_question)
selected_name = name_answer['name'] if name_answer else None

# Set the model to the selected model
interpreter.llm.model = f"ollama/{selected_name}"
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
time.sleep(1)

# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
except (subprocess.CalledProcessError, FileNotFoundError) as e:
print("Ollama is not installed or not recognized as a command.")
time.sleep(1)
interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n")
time.sleep(2)
sys.exit(1)

# elif selected_model == "Jan":
# interpreter.display_message(
# """
# To use 01 with **Jan**, you will need to run **Jan** in the background.

# 1. Download **Jan** from [https://jan.ai/](https://jan.ai/), then start it.
# 2. Select a language model from the "Hub" tab, then click **Download**.
# 3. Copy the ID of the model and enter it below.
# 3. Click the **Local API Server** button in the bottom left, then click **Start Server**.


# Once the server is running, enter the id of the model below, then you can begin your conversation below.

# """
# )
# interpreter.llm.api_base = "http://localhost:1337/v1"
# interpreter.llm.max_tokens = 1000
# interpreter.llm.context_window = 3000
# time.sleep(1)

# # Prompt the user to enter the name of the model running on Jan
# model_name_question = [
# inquirer.Text('jan_model_name', message="Enter the id of the model you have running on Jan"),
# ]
# model_name_answer = inquirer.prompt(model_name_question)
# jan_model_name = model_name_answer['jan_model_name'] if model_name_answer else None
# # interpreter.llm.model = f"jan/{jan_model_name}"
# interpreter.llm.model = ""
# interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n")
# time.sleep(1)


# Set the system message to a minimal version for all local models.
# Set offline for all local models
interpreter.offline = True


2 changes: 2 additions & 0 deletions software/start.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import importlib
from source.server.tunnel import create_tunnel
from source.server.server import main
from source.server.utils.local_mode import select_local_model

import signal
app = typer.Typer()
Expand Down Expand Up @@ -93,6 +94,7 @@ def _run(
tts_service = "piper"
# llm_service = "llamafile"
stt_service = "local-whisper"
select_local_model()

if not server_url:
server_url = f"{server_host}:{server_port}"
Expand Down

0 comments on commit 1190dcc

Please sign in to comment.