-
Notifications
You must be signed in to change notification settings - Fork 0
/
sendprompts.py
148 lines (113 loc) · 3.57 KB
/
sendprompts.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import requests
from sqlalchemy import create_engine, text, Connection
from tqdm import tqdm
import argparse
import json
API_SECRET_FN = "secretkey.txt"
def get_cmdargs() -> dict:
"""get args from command line
Returns:
dict: cmd arguments
"""
ap = argparse.ArgumentParser(
description = "Send prompt to OpenAI and print response"
)
ap.add_argument(
"-u",
"--userprompt",
help = "Users question to the Augustus",
required = True
)
ap.add_argument(
"-v",
"--verbose",
default = False,
action = "store_true"
)
ap.add_argument(
"-p",
"--dbport",
help = "Database port",
default = "5432"
)
return vars(ap.parse_args())
def get_embeddings(inp_text: str) -> list:
"""Send natural language text to OpenAI Embeddings API
Args:
input_text (str): A string that represents the input
Returns:
list: list of embeddings
"""
header = {
"Content-Type": "application/json",
"Authorization": f"Bearer {APIKEY}"
}
data = {
"input": inp_text,
"model": "text-embedding-ada-002"
}
result = requests.post(
url = "https://api.openai.com/v1/embeddings",
headers = header,
json = data
)
return json.loads(result.content)["data"][0]["embedding"]
def send_prompt(userrequest: str, topn: int, con: Connection, verbose: bool) -> str:
"""Query vector store and send prompt to OpenAI completions endpoint
Args:
userrequest (str): Question to ask
topn (int): Number of top results to return from db
con (Connection): Connection object
Returns:
str: Answer to question
"""
CONTEXTPROMPT = """
You will respond with the voice of roman emperor Marcus Aurelius.
"""
PROMTTEMPLATE1 = """
Give response to the following question:
"""
PROMTTEMPLATE2 = """
Use only the content of the following list to give answer.
Stay within the confines of the information proved by the sentences
in this following list:
"""
qemb = get_embeddings(userrequest)
res = con.execute(
text("select rawtext from embed order by embeddings <=> :qemb limit :topn;"),
parameters = {"qemb": str(qemb).replace(" ", ""), "topn": topn}
)
res_list = ["- " + r[0] for r in res]
user_str = "\n".join(res_list)
prompt = CONTEXTPROMPT + PROMTTEMPLATE1 + userrequest + PROMTTEMPLATE2 + user_str
header = {
"Content-Type": "application/json",
"Authorization": f"Bearer {APIKEY}"
}
data = {
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": prompt
}
]
}
request = requests.post(
url = "https://api.openai.com/v1/chat/completions",
headers = header,
json = data
)
outp = json.loads(request.content)["choices"][0]["message"]["content"]
if verbose:
return f"question : {userrequest}\ndblist : {res_list}\n" + outp
else:
return outp
if __name__ == "__main__":
cmdargs = get_cmdargs()
with open(API_SECRET_FN, "r") as fh:
APIKEY = fh.read().strip()
engine = create_engine(f"postgresql+psycopg2://root:password@localhost:{cmdargs['dbport']}/postgres")
with engine.connect() as c:
result = send_prompt(cmdargs["userprompt"], 10, c, cmdargs["verbose"])
print(result)