From 86b61e31deb611ec5a5f90478e82620c20e2dbee Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Mon, 4 Nov 2024 20:31:27 -0800
Subject: [PATCH] `--model` support
---
computer_use/cli.py | 55 ++++--
computer_use/loop.py | 413 ++++++++++++++++++++++++++-----------------
poetry.lock | 32 ++--
3 files changed, 316 insertions(+), 184 deletions(-)
diff --git a/computer_use/cli.py b/computer_use/cli.py
index 814ce6f79..37424bdb8 100644
--- a/computer_use/cli.py
+++ b/computer_use/cli.py
@@ -1,8 +1,12 @@
+import importlib.util
import json
+import os
import random
import sys
import time
+import platformdirs
+
from .loop import run_async_main
from .ui.edit import CodeStreamView
@@ -34,23 +38,26 @@ def help_message():
"\033[38;5;240mTip: Type `wtf` in your terminal to fix the last error\033[0m",
"\033[38;5;240mTip: Your terminal is a chatbox. Type `i want to...`\033[0m",
]
+ BLUE_COLOR = "\033[94m"
+ RESET_COLOR = "\033[0m"
+
content = f"""
A standard interface for computer-controlling agents.
-Run \033[34minterpreter\033[0m or \033[34mi [prompt]\033[0m to begin.
+Run {BLUE_COLOR}interpreter{RESET_COLOR} or {BLUE_COLOR}i [prompt]{RESET_COLOR} to begin.
-\033[34m--gui\033[0m Enable display, mouse, and keyboard control
-\033[34m--model\033[0m Specify language model or OpenAI-compatible URL
-\033[34m--serve\033[0m Start an OpenAI-compatible server at \033[34m/\033[0m
+{BLUE_COLOR}--gui{RESET_COLOR} Enable display, mouse, and keyboard control
+{BLUE_COLOR}--model{RESET_COLOR} Specify language model or OpenAI-compatible URL
+{BLUE_COLOR}--serve{RESET_COLOR} Start an OpenAI-compatible server at {BLUE_COLOR}/{RESET_COLOR}
-\033[34m-y\033[0m Automatically approve tools
-\033[34m-d\033[0m Run in debug mode
+{BLUE_COLOR}-y{RESET_COLOR} Automatically approve tools
+{BLUE_COLOR}-d{RESET_COLOR} Run in debug mode
Examples:
-\033[34mi need help with my code\033[0m
-\033[34mi --model gpt-4o-mini --serve\033[0m
-\033[34mi --model https://localhost:1234/v1\033[0m
+{BLUE_COLOR}i need help with my code{RESET_COLOR}
+{BLUE_COLOR}i --model gpt-4o-mini --serve{RESET_COLOR}
+{BLUE_COLOR}i --model https://localhost:1234/v1{RESET_COLOR}
{random.choice(tips)}
""".strip()
@@ -68,11 +75,39 @@ def help_message():
time.sleep(0.03)
print("")
time.sleep(0.04)
- print("\033[38;5;238mA.C., 2024. https://openinterpreter.com/\033[0m\n")
+ # print("\033[38;5;238mA.C., 2024. https://openinterpreter.com/\033[0m\n")
+ print("\033[38;5;238mhttps://openinterpreter.com/\033[0m\n")
time.sleep(0.05)
def main():
+ oi_dir = platformdirs.user_config_dir("open-interpreter")
+ profiles_dir = os.path.join(oi_dir, "profiles")
+
+ # Get profile path from command line args
+ profile = None
+ for i, arg in enumerate(sys.argv):
+ if arg == "--profile" and i + 1 < len(sys.argv):
+ profile = sys.argv[i + 1]
+ break
+
+ if profile:
+ if not os.path.isfile(profile):
+ profile = os.path.join(profiles_dir, profile)
+ if not os.path.isfile(profile):
+ profile += ".py"
+ if not os.path.isfile(profile):
+ print(f"Invalid profile path: {profile}")
+ exit(1)
+
+ # Load the profile module from the provided path
+ spec = importlib.util.spec_from_file_location("profile", profile)
+ profile_module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(profile_module)
+
+ # Get the interpreter from the profile
+ interpreter = profile_module.interpreter
+
if len(sys.argv) > 1 and sys.argv[1] == "--help":
help_message()
else:
diff --git a/computer_use/loop.py b/computer_use/loop.py
index 82eda7d82..2a5ba6311 100755
--- a/computer_use/loop.py
+++ b/computer_use/loop.py
@@ -3,6 +3,7 @@
"""
import asyncio
+import dataclasses
import json
import os
import platform
@@ -57,10 +58,12 @@
from .tools import BashTool, ComputerTool, EditTool, ToolCollection, ToolResult
from .ui.edit import CodeStreamView
-model_choice = "claude-3.5-sonnet"
+model_choice = "claude-3-5-sonnet-20241022"
if "--model" in sys.argv and sys.argv[sys.argv.index("--model") + 1]:
model_choice = sys.argv[sys.argv.index("--model") + 1]
+import litellm
+
md = MarkdownStreamer()
COMPUTER_USE_BETA_FLAG = "computer-use-2024-10-22"
@@ -196,7 +199,172 @@ async def sampling_loop(
# implementation may be able call the SDK directly with:
# `response = client.messages.create(...)` instead.
- if model_choice == "gpt-4o":
+ use_anthropic = (
+ litellm.get_model_info(model_choice)["litellm_provider"] == "anthropic"
+ )
+
+ if use_anthropic:
+ # Use Anthropic API which supports betas
+ raw_response = client.beta.messages.create(
+ max_tokens=max_tokens,
+ messages=messages,
+ model=model,
+ system=system["text"],
+ tools=tool_collection.to_params(),
+ betas=betas,
+ stream=True,
+ )
+
+ response_content = []
+ current_block = None
+
+ for chunk in raw_response:
+ if isinstance(chunk, BetaRawContentBlockStartEvent):
+ current_block = chunk.content_block
+ elif isinstance(chunk, BetaRawContentBlockDeltaEvent):
+ if chunk.delta.type == "text_delta":
+ # print(f"{chunk.delta.text}", end="", flush=True)
+ md.feed(chunk.delta.text)
+ yield {"type": "chunk", "chunk": chunk.delta.text}
+ await asyncio.sleep(0)
+ if current_block and current_block.type == "text":
+ current_block.text += chunk.delta.text
+ elif chunk.delta.type == "input_json_delta":
+ # Initialize partial_json if needed
+ if not hasattr(current_block, "partial_json"):
+ current_block.partial_json = ""
+ current_block.parsed_json = {}
+ current_block.current_key = None
+ current_block.current_value = ""
+
+ # Add new JSON delta
+ current_block.partial_json += chunk.delta.partial_json
+
+ # print(chunk.delta.partial_json)
+
+ # If name attribute is present on current_block:
+ if hasattr(current_block, "name"):
+ if edit.name == None:
+ edit.name = current_block.name
+ edit.feed(chunk.delta.partial_json)
+
+ elif isinstance(chunk, BetaRawContentBlockStopEvent):
+ edit.close()
+ edit = CodeStreamView()
+ if current_block:
+ if hasattr(current_block, "partial_json"):
+ # Finished a tool call
+ # print()
+ current_block.input = json.loads(current_block.partial_json)
+ # yield {"type": "chunk", "chunk": current_block.input}
+ delattr(current_block, "partial_json")
+ else:
+ # Finished a message
+ # print("\n")
+ md.feed("\n")
+ yield {"type": "chunk", "chunk": "\n"}
+ await asyncio.sleep(0)
+ # Clean up any remaining attributes from partial processing
+ if current_block:
+ for attr in [
+ "partial_json",
+ "parsed_json",
+ "current_key",
+ "current_value",
+ ]:
+ if hasattr(current_block, attr):
+ delattr(current_block, attr)
+ response_content.append(current_block)
+ current_block = None
+
+ response = BetaMessage(
+ id=str(uuid.uuid4()),
+ content=response_content,
+ role="assistant",
+ model=model,
+ stop_reason=None,
+ stop_sequence=None,
+ type="message",
+ usage={
+ "input_tokens": 0,
+ "output_tokens": 0,
+ }, # Add a default usage dictionary
+ )
+
+ messages.append(
+ {
+ "role": "assistant",
+ "content": cast(list[BetaContentBlockParam], response.content),
+ }
+ )
+
+ user_approval = None
+
+ if "-y" in sys.argv or "--yes" in sys.argv: # or "--os" in sys.argv:
+ user_approval = "y"
+ else:
+ # If not in terminal, break
+ if not sys.stdin.isatty():
+ # Error out
+ print(
+ "Error: You appear to be running in a non-interactive environment, so cannot approve tools. Add the `-y` flag to automatically approve tools in non-interactive environments."
+ )
+ # Exit
+ exit(1)
+
+ content_blocks = cast(list[BetaContentBlock], response.content)
+ tool_use_blocks = [b for b in content_blocks if b.type == "tool_use"]
+ if len(tool_use_blocks) > 1:
+ print(f"\n\033[38;5;240mRun actions above\033[0m?")
+ user_approval = input("\n(y/n): ").lower().strip()
+ elif len(tool_use_blocks) == 1:
+ print(f"\n\033[38;5;240mRun tool?\033[0m")
+ # print(
+ # f"\n\033[38;5;240mRun tool \033[0m\033[1m{tool_use_blocks[0].name}\033[0m?"
+ # )
+ user_approval = input("\n(y/n): ").lower().strip()
+ print()
+
+ tool_result_content: list[BetaToolResultBlockParam] = []
+ for content_block in cast(list[BetaContentBlock], response.content):
+ output_callback(content_block)
+ if content_block.type == "tool_use":
+ # Ask user if they want to create the file
+ # path = "/tmp/test_file.txt"
+ # print(f"\n\033[38;5;240m Create \033[0m\033[1m{path}\033[0m?")
+ # response = input(f"\n\033[38;5;240m Create \033[0m\033[1m{path}\033[0m?" + " (y/n): ").lower().strip()
+ # Ask user for confirmation before running tool
+ edit.close()
+
+ if user_approval == "y":
+ result = await tool_collection.run(
+ name=content_block.name,
+ tool_input=cast(dict[str, Any], content_block.input),
+ )
+ else:
+ result = ToolResult(output="Tool execution cancelled by user")
+ tool_result_content.append(
+ _make_api_tool_result(result, content_block.id)
+ )
+ tool_output_callback(result, content_block.id)
+
+ if user_approval == "n":
+ messages.append({"content": tool_result_content, "role": "user"})
+ yield {"type": "messages", "messages": messages}
+ break
+
+ if not tool_result_content:
+ # Done!
+ yield {"type": "messages", "messages": messages}
+ break
+
+ if use_anthropic:
+ messages.append({"content": tool_result_content, "role": "user"})
+ else:
+ messages.append({"content": tool_result_content, "role": "tool"})
+
+ else:
+ # Use Litellm
tools = [
{
"type": "function",
@@ -216,173 +384,95 @@ async def sampling_loop(
},
}
]
- from openai import OpenAI
- client = OpenAI()
- raw_response = client.chat.completions.create(
- model="gpt-4o",
- messages=[{"role": "system", "content": system["text"]}] + messages,
- tools=tools,
- stream=True,
- max_tokens=max_tokens,
- )
- else:
- raw_response = client.beta.messages.create(
- max_tokens=max_tokens,
- messages=messages,
- model=model,
- system=system["text"],
- tools=tool_collection.to_params(),
- betas=betas,
- stream=True,
- )
+ params = {
+ "model": model_choice,
+ "messages": [{"role": "system", "content": system["text"]}] + messages,
+ "tools": tools,
+ "stream": True,
+ "max_tokens": max_tokens,
+ }
- response_content = []
- current_block = None
-
- for chunk in raw_response:
- # chunk = chunk.choices[0]
- # # time.sleep(5)
- if isinstance(chunk, BetaRawContentBlockStartEvent):
- current_block = chunk.content_block
- elif isinstance(chunk, BetaRawContentBlockDeltaEvent):
- if chunk.delta.type == "text_delta":
- # print(f"{chunk.delta.text}", end="", flush=True)
- md.feed(chunk.delta.text)
- yield {"type": "chunk", "chunk": chunk.delta.text}
+ raw_response = litellm.completion(**params)
+
+ message = None
+
+ for chunk in raw_response:
+ if message == None:
+ message = chunk.choices[0].delta
+
+ if chunk.choices[0].delta.content:
+ md.feed(chunk.choices[0].delta.content)
+ yield {"type": "chunk", "chunk": chunk.choices[0].delta.content}
await asyncio.sleep(0)
- if current_block and current_block.type == "text":
- current_block.text += chunk.delta.text
- elif chunk.delta.type == "input_json_delta":
- # Initialize partial_json if needed
- if not hasattr(current_block, "partial_json"):
- current_block.partial_json = ""
- current_block.parsed_json = {}
- current_block.current_key = None
- current_block.current_value = ""
-
- # Add new JSON delta
- current_block.partial_json += chunk.delta.partial_json
-
- # print(chunk.delta.partial_json)
-
- # If name attribute is present on current_block:
- if hasattr(current_block, "name"):
- if edit.name == None:
- edit.name = current_block.name
- edit.feed(chunk.delta.partial_json)
-
- elif isinstance(chunk, BetaRawContentBlockStopEvent):
- edit.close()
- edit = CodeStreamView()
- if current_block:
- if hasattr(current_block, "partial_json"):
- # Finished a tool call
- # print()
- current_block.input = json.loads(current_block.partial_json)
- # yield {"type": "chunk", "chunk": current_block.input}
- delattr(current_block, "partial_json")
- else:
- # Finished a message
- # print("\n")
- md.feed("\n")
- yield {"type": "chunk", "chunk": "\n"}
- await asyncio.sleep(0)
- # Clean up any remaining attributes from partial processing
- if current_block:
- for attr in [
- "partial_json",
- "parsed_json",
- "current_key",
- "current_value",
- ]:
- if hasattr(current_block, attr):
- delattr(current_block, attr)
- response_content.append(current_block)
- current_block = None
-
- response = BetaMessage(
- id=str(uuid.uuid4()),
- content=response_content,
- role="assistant",
- model=model,
- stop_reason=None,
- stop_sequence=None,
- type="message",
- usage={
- "input_tokens": 0,
- "output_tokens": 0,
- }, # Add a default usage dictionary
- )
- messages.append(
- {
- "role": "assistant",
- "content": cast(list[BetaContentBlockParam], response.content),
- }
- )
+ # If the delta == message, we're on the first block, so this content is already in there
+ if chunk.choices[0].delta != message:
+ message.content += chunk.choices[0].delta.content
+ if chunk.choices[0].delta.tool_calls:
+ if chunk.choices[0].delta.tool_calls[0].id:
+ if message.tool_calls == None or chunk.choices[
+ 0
+ ].delta.tool_calls[0].id not in [
+ t.id for t in message.tool_calls
+ ]:
+ edit.close()
+ edit = CodeStreamView()
+ message.tool_calls.append(
+ chunk.choices[0].delta.tool_calls[0]
+ )
+ current_tool_call = [
+ t
+ for t in message.tool_calls
+ if t.id == chunk.choices[0].delta.tool_calls[0].id
+ ][0]
+
+ if chunk.choices[0].delta.tool_calls[0].function.name:
+ tool_name = chunk.choices[0].delta.tool_calls[0].function.name
+ if edit.name == None:
+ edit.name = tool_name
+ if current_tool_call.function.name == None:
+ current_tool_call.function.name = tool_name
+ if chunk.choices[0].delta.tool_calls[0].function.arguments:
+ arguments_delta = (
+ chunk.choices[0].delta.tool_calls[0].function.arguments
+ )
+ edit.feed(arguments_delta)
+
+ # If the delta == message, we're on the first block, so this arguments_delta is already in there
+ if chunk.choices[0].delta != message:
+ current_tool_call.function.arguments += arguments_delta
+
+ if chunk.choices[0].finish_reason:
+ edit.close()
+ edit = CodeStreamView()
+
+ messages.append(message)
+
+ if not message.tool_calls:
+ yield {"type": "messages", "messages": messages}
+ break
- user_approval = None
+ user_approval = input("\nRun tool(s)? (y/n): ").lower().strip()
- if "-y" in sys.argv or "--yes" in sys.argv: # or "--os" in sys.argv:
- user_approval = "y"
- else:
- # If not in terminal, break
- if not sys.stdin.isatty():
- # Error out
- print(
- "Error: You appear to be running in a non-interactive environment, so cannot approve tools. Add the `-y` flag to automatically approve tools in non-interactive environments."
- )
- # Exit
- exit(1)
-
- content_blocks = cast(list[BetaContentBlock], response.content)
- tool_use_blocks = [b for b in content_blocks if b.type == "tool_use"]
- if len(tool_use_blocks) > 1:
- print(f"\n\033[38;5;240mRun actions above\033[0m?")
- user_approval = input("\n(y/n): ").lower().strip()
- elif len(tool_use_blocks) == 1:
- print(f"\n\033[38;5;240mRun tool?\033[0m")
- # print(
- # f"\n\033[38;5;240mRun tool \033[0m\033[1m{tool_use_blocks[0].name}\033[0m?"
- # )
- user_approval = input("\n(y/n): ").lower().strip()
- print()
-
- tool_result_content: list[BetaToolResultBlockParam] = []
- for content_block in cast(list[BetaContentBlock], response.content):
- output_callback(content_block)
- if content_block.type == "tool_use":
- # Ask user if they want to create the file
- # path = "/tmp/test_file.txt"
- # print(f"\n\033[38;5;240m Create \033[0m\033[1m{path}\033[0m?")
- # response = input(f"\n\033[38;5;240m Create \033[0m\033[1m{path}\033[0m?" + " (y/n): ").lower().strip()
- # Ask user for confirmation before running tool
- edit.close()
+ for tool_call in message.tool_calls:
+ function_arguments = json.loads(tool_call.function.arguments)
if user_approval == "y":
result = await tool_collection.run(
- name=content_block.name,
- tool_input=cast(dict[str, Any], content_block.input),
+ name=tool_call.function.name,
+ tool_input=cast(dict[str, Any], function_arguments),
)
else:
result = ToolResult(output="Tool execution cancelled by user")
- tool_result_content.append(
- _make_api_tool_result(result, content_block.id)
- )
- tool_output_callback(result, content_block.id)
-
- if user_approval == "n":
- messages.append({"content": tool_result_content, "role": "user"})
- yield {"type": "messages", "messages": messages}
- break
- if not tool_result_content:
- # Done!
- yield {"type": "messages", "messages": messages}
- break
-
- messages.append({"content": tool_result_content, "role": "user"})
+ messages.append(
+ {
+ "role": "tool",
+ "content": json.dumps(dataclasses.asdict(result)),
+ "tool_call_id": tool_call.id,
+ }
+ )
def _maybe_filter_to_n_most_recent_images(
@@ -664,8 +754,13 @@ def new_welcome():
while not exit_flag:
# If is atty, get input from user
+ placeholder_color = "ansiblack"
+ placeholder_color = "ansigray"
+
if sys.stdin.isatty():
- placeholder = HTML('Use """ for multi-line prompts')
+ placeholder = HTML(
+ f'<{placeholder_color}>Use """ for multi-line prompts{placeholder_color}>'
+ )
# placeholder = HTML(' Send a message (/? for help)')
session = PromptSession()
# Initialize empty message for multi-line input
@@ -678,7 +773,9 @@ def new_welcome():
# Check if starting multi-line input
if first_line.strip() == '"""':
while True:
- placeholder = HTML('Use """ again to finish')
+ placeholder = HTML(
+ f'<{placeholder_color}>Use """ again to finish{placeholder_color}>'
+ )
line = await session.prompt_async("", placeholder=placeholder)
if line.strip().endswith('"""'):
break
diff --git a/poetry.lock b/poetry.lock
index c9c3fd4b8..ad6915568 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1049,13 +1049,13 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth
[[package]]
name = "face"
-version = "22.0.0"
+version = "24.0.0"
description = "A command-line application framework (and CLI parser). Friendly for users, full-featured for developers."
optional = true
python-versions = "*"
files = [
- {file = "face-22.0.0-py3-none-any.whl", hash = "sha256:344fe31562d0f6f444a45982418f3793d4b14f9abb98ccca1509d22e0a3e7e35"},
- {file = "face-22.0.0.tar.gz", hash = "sha256:d5d692f90bc8f5987b636e47e36384b9bbda499aaf0a77aa0b0bbe834c76923d"},
+ {file = "face-24.0.0-py3-none-any.whl", hash = "sha256:0e2c17b426fa4639a4e77d1de9580f74a98f4869ba4c7c8c175b810611622cd3"},
+ {file = "face-24.0.0.tar.gz", hash = "sha256:611e29a01ac5970f0077f9c577e746d48c082588b411b33a0dd55c4d872949f6"},
]
[package.dependencies]
@@ -2447,13 +2447,13 @@ test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"]
[[package]]
name = "litellm"
-version = "1.51.2"
+version = "1.51.3"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
files = [
- {file = "litellm-1.51.2-py3-none-any.whl", hash = "sha256:a8469d8b037a61fd7ba5041b2c42b7f1ba433d4953081b86c9916b6dd220e51c"},
- {file = "litellm-1.51.2.tar.gz", hash = "sha256:a86e524b5cf9ad2863d835adb81fd92afd206a190c8629f3e86f652b5f405efa"},
+ {file = "litellm-1.51.3-py3-none-any.whl", hash = "sha256:440d3c7cc5ab8eeb12cee8f4d806bff05b7db834ebc11117d7fa070a1142ced5"},
+ {file = "litellm-1.51.3.tar.gz", hash = "sha256:31eff9fcbf7b058bac0fd7432c4ea0487e8555f12446a1f30e5862e33716f44d"},
]
[package.dependencies]
@@ -3112,13 +3112,13 @@ files = [
[[package]]
name = "openai"
-version = "1.53.0"
+version = "1.54.0"
description = "The official Python library for the openai API"
optional = false
-python-versions = ">=3.7.1"
+python-versions = ">=3.8"
files = [
- {file = "openai-1.53.0-py3-none-any.whl", hash = "sha256:20f408c32fc5cb66e60c6882c994cdca580a5648e10045cd840734194f033418"},
- {file = "openai-1.53.0.tar.gz", hash = "sha256:be2c4e77721b166cce8130e544178b7d579f751b4b074ffbaade3854b6f85ec5"},
+ {file = "openai-1.54.0-py3-none-any.whl", hash = "sha256:24ed8874b56e919f0fbb80b7136c3fb022dc82ce9f5f21579b7b280ea4bba249"},
+ {file = "openai-1.54.0.tar.gz", hash = "sha256:df2a84384314165b706722a7ac8988dc33eba20dd7fc3b939d138110e608b1ce"},
]
[package.dependencies]
@@ -3153,8 +3153,8 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
- {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
{version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
+ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
{version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
{version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
{version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""},
@@ -3179,8 +3179,8 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
- {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
{version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""},
+ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
{version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
{version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
{version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""},
@@ -6976,8 +6976,8 @@ files = [
[package.dependencies]
Pillow = [
- {version = ">=9.2.0", markers = "python_version == \"3.10\" or python_version == \"3.9\""},
{version = ">=9.3.0", markers = "python_version == \"3.11\""},
+ {version = ">=9.2.0", markers = "python_version == \"3.10\" or python_version == \"3.9\""},
]
[[package]]
@@ -7502,13 +7502,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""}
[[package]]
name = "readchar"
-version = "4.2.0"
+version = "4.2.1"
description = "Library to easily read single chars and key strokes"
optional = false
python-versions = ">=3.8"
files = [
- {file = "readchar-4.2.0-py3-none-any.whl", hash = "sha256:2a587a27c981e6d25a518730ad4c88c429c315439baa6fda55d7a8b3ac4cb62a"},
- {file = "readchar-4.2.0.tar.gz", hash = "sha256:44807cbbe377b72079fea6cba8aa91c809982d7d727b2f0dbb2d1a8084914faa"},
+ {file = "readchar-4.2.1-py3-none-any.whl", hash = "sha256:a769305cd3994bb5fa2764aa4073452dc105a4ec39068ffe6efd3c20c60acc77"},
+ {file = "readchar-4.2.1.tar.gz", hash = "sha256:91ce3faf07688de14d800592951e5575e9c7a3213738ed01d394dcc949b79adb"},
]
[[package]]