diff --git a/modules/chatgpt.py b/modules/chatgpt.py index 814a01a..3b162ac 100644 --- a/modules/chatgpt.py +++ b/modules/chatgpt.py @@ -155,24 +155,27 @@ async def chatgpt_chat(self, message: ServiceMessage) -> Response: self.class_name, msg=f"sending chat prompt to chatgpt, engine {engine} ({engine.description})", ) - chatcompletion = cast( - OpenAIObject, - openai.ChatCompletion.create(model=str(engine), messages=messages), - ) - if chatcompletion.choices: - response = chatcompletion.choices[0].message.content - - # sometimes the response starts with "Stampy says:" or responds or replies etc, which we don't want - response = re.sub(r"^[sS]tampy\ ?[a-zA-Z]{,15}:\s?", "", response) - - self.log.info(self.class_name, response=response) - - if response: - return Response( - confidence=10, - text=f"{im}{response}{im}", - why="ChatGPT made me say it!", - ) + try: + chatcompletion = cast( + OpenAIObject, + openai.ChatCompletion.create(model=str(engine), messages=messages), + ) + if chatcompletion.choices: + response = chatcompletion.choices[0].message.content + + # sometimes the response starts with "Stampy says:" or responds or replies etc, which we don't want + response = re.sub(r"^[sS]tampy\ ?[a-zA-Z]{,15}:\s?", "", response) + + self.log.info(self.class_name, response=response) + + if response: + return Response( + confidence=10, + text=f"{im}{response}{im}", + why="ChatGPT made me say it!", + ) + except openai.error.Timeout: + pass return Response() def __str__(self): diff --git a/servicemodules/discord.py b/servicemodules/discord.py index 7afb706..1979af5 100644 --- a/servicemodules/discord.py +++ b/servicemodules/discord.py @@ -291,7 +291,8 @@ async def on_message( sent.append(await message.channel.send(chunk)) elif isinstance(top_response.text, Iterable): for chunk in top_response.text: - sent.append(await message.channel.send(chunk)) + if chunk: + sent.append(await message.channel.send(chunk)) why_traceback.append("Responded with that response!") for m in sent: self.messages[str(m.id)] = {