generated from CubeGPT/CubeAgents
-
Notifications
You must be signed in to change notification settings - Fork 5
/
core.py
177 lines (138 loc) · 4.89 KB
/
core.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
from openai import OpenAI
import chardet
import sys
import json
import locale
import os
from log_writer import logger
import config
def initialize():
"""
Initializes the software.
This function logs the software launch, including the version number and platform.
Args:
None
Returns:
None
"""
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
logger(f"Launch. Software version {config.VERSION_NUMBER}, platform {sys.platform}")
if (
"gpt-3.5" in config.GENERATION_MODEL
and config.BYPASS_NO_GPT35_FOR_GENERATION_LIMIT is False
):
print(
"gpt-3.5 writes bugs *all the time* and is not recommended for code generation. Switching to gpt-4."
)
config.edit_config(
"GENERATION_MODEL", config.GENERATION_MODEL.replace("gpt-3.5", "gpt-4")
)
def askgpt(
system_prompt: str,
user_prompt: str,
model_name: str,
disable_json_mode: bool = False,
image_url: str = None,
):
"""
Interacts with ChatGPT using the specified prompts.
Args:
system_prompt (str): The system prompt.
user_prompt (str): The user prompt.
model_name (str): The model name to use.
disable_json_mode (bool): Whether to disable JSON mode.
Returns:
str: The response from ChatGPT.
"""
if image_url is not None and config.USE_DIFFERENT_APIKEY_FOR_VISION_MODEL:
logger("Using different API key for vision model.")
client = OpenAI(api_key=config.VISION_API_KEY, base_url=config.VISION_BASE_URL)
else:
client = OpenAI(api_key=config.API_KEY, base_url=config.BASE_URL)
logger("Initialized the OpenAI client.")
# Define the messages for the conversation
if image_url is not None:
messages = [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": [
{"type": "text", "text": user_prompt},
{"type": "image_url", "image_url": {"url": image_url}},
],
},
]
else:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
logger(f"askgpt: system {system_prompt}")
logger(f"askgpt: user {user_prompt}")
# Create a chat completion
if disable_json_mode:
response = client.chat.completions.create(model=model_name, messages=messages)
else:
response = client.chat.completions.create(
model=model_name, response_format={"type": "json_object"}, messages=messages
)
logger(f"askgpt: response {response}")
# Extract the assistant's reply
assistant_reply = response.choices[0].message.content
logger(f"askgpt: extracted reply {assistant_reply}")
return assistant_reply
def response_to_action(msg):
"""
Converts a response from ChatGPT to an action.
Args:
msg (str): The response from ChatGPT.
Returns:
str: The action to take.
"""
text = json.loads(msg)
codes = text["codes"]
for section in codes:
file = section["file"]
code = section["code"]
paths = file.split("/")
# Join the list elements to form a path
path = os.path.join(*paths)
# Get the directory path and the file name
dir_path, file_name = os.path.split(path)
# Create directories, if they don't exist
try:
os.makedirs(dir_path, exist_ok=True)
except FileNotFoundError:
pass
# Create the file
with open(path, "w") as f:
f.write(code) # Write an empty string to the file
def mixed_decode(text: str):
"""
Decode a mixed text containing both normal text and a byte sequence.
Args:
text (str): The mixed text to be decoded.
Returns:
str: The decoded text, where the byte sequence has been converted to its corresponding characters.
"""
# Split the normal text and the byte sequence
# Assuming the byte sequence is everything after the last colon and space ": "
try:
normal_text, byte_text = text.rsplit(": ", 1)
except (TypeError, ValueError):
# The text only contains normal text
return text
# Convert the byte sequence to actual bytes
byte_sequence = byte_text.encode(
"latin1"
) # latin1 encoding maps byte values directly to unicode code points
# Detect the encoding of the byte sequence
detected_encoding = chardet.detect(byte_sequence)
encoding = detected_encoding["encoding"]
# Decode the byte sequence
decoded_text = byte_sequence.decode(encoding)
# Combine the normal text with the decoded byte sequence
final_text = normal_text + ": " + decoded_text
return final_text
if __name__ == "__main__":
print("This script is not meant to be run directly. Please run console.py instead.")