forked from smol-ai/developer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
debugger_no_modal.py
129 lines (113 loc) · 4.01 KB
/
debugger_no_modal.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import sys
import os
from time import sleep
from constants import DEFAULT_DIR, DEFAULT_MODEL, DEFAULT_MAX_TOKENS, EXTENSION_TO_SKIP
import argparse
def read_file(filename):
with open(filename, "r") as file:
return file.read()
def walk_directory(directory):
image_extensions = [
".png",
".jpg",
".jpeg",
".gif",
".bmp",
".svg",
".ico",
".tif",
".tiff",
]
code_contents = {}
for root, dirs, files in os.walk(directory):
for file in files:
if not any(file.endswith(ext) for ext in image_extensions):
try:
relative_filepath = os.path.relpath(
os.path.join(root, file), directory
)
code_contents[relative_filepath] = read_file(
os.path.join(root, file)
)
except Exception as e:
code_contents[
relative_filepath
] = f"Error reading file {file}: {str(e)}"
return code_contents
def main(args):
prompt=args.prompt
directory= args.directory
model=args.model
code_contents = walk_directory(directory)
# Now, `code_contents` is a dictionary that contains the content of all your non-image files
# You can send this to OpenAI's text-davinci-003 for help
context = "\n".join(
f"{path}:\n{contents}" for path, contents in code_contents.items()
)
system = "You are an AI debugger who is trying to debug a program for a user based on their file system. The user has provided you with the following files and their contents, finally folllowed by the error message or issue they are facing."
prompt = (
"My files are as follows: "
+ context
+ "\n\n"
+ "My issue is as follows: "
+ prompt
)
prompt += (
"\n\nGive me ideas for what could be wrong and what fixes to do in which files."
)
res = generate_response(system, prompt, model)
# print res in teal
print("\033[96m" + res + "\033[0m")
def generate_response(system_prompt, user_prompt, model=DEFAULT_MODEL, *args):
import openai
# Set up your OpenAI API credentials
openai.api_key = os.environ["OPENAI_API_KEY"]
messages = []
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": user_prompt})
# loop thru each arg and add it to messages alternating role between "assistant" and "user"
role = "assistant"
for value in args:
messages.append({"role": role, "content": value})
role = "user" if role == "assistant" else "assistant"
params = {
"model": model,
# "model": "gpt-4",
"messages": messages,
"max_tokens": 1500,
"temperature": 0,
}
# Send the API request
keep_trying = True
while keep_trying:
try:
response = openai.ChatCompletion.create(**params)
keep_trying = False
except Exception as e:
# e.g. when the API is too busy, we don't want to fail everything
print("Failed to generate response. Error: ", e)
sleep(30)
print("Retrying...")
# Get the reply from the API response
reply = response.choices[0]["message"]["content"]
return reply
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"prompt",
help="The prompt to use for the AI. This should be the error message or issue you are facing.",
)
parser.add_argument(
"--directory",
"-d",
help="The directory to use for the AI. This should be the directory containing the files you want to debug.",
default=DEFAULT_DIR,
)
parser.add_argument(
"--model",
"-m",
help="The model to use for the AI. This should be the model ID of the model you want to use.",
default=DEFAULT_MODEL,
)
args = parser.parse_args()
main(args)