Skip to content

Commit

Permalink
Merge pull request #142 from fractalego/removing-dead-code
Browse files Browse the repository at this point in the history
Removing dead code
  • Loading branch information
fractalego authored Dec 15, 2024
2 parents 3c9772c + cfd4844 commit 16d7ee0
Show file tree
Hide file tree
Showing 11 changed files with 31 additions and 42 deletions.
1 change: 0 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
"wafl.connectors.remote",
"wafl.data_objects",
"wafl.events",
"wafl.extractors",
"wafl.handlers",
"wafl.inference",
"wafl.interface",
Expand Down
6 changes: 3 additions & 3 deletions tests/test_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
class TestConnection(TestCase):
def test__connection_to_generative_model_can_generate_text(self):
config = Configuration.load_local_config()
connector = RemoteLLMConnector(config.get_value("llm_model"))
connector = RemoteLLMConnector(config)
prediction = asyncio.run(
connector.predict(
PromptCreator.create_from_one_instruction(
Expand All @@ -25,7 +25,7 @@ def test__connection_to_generative_model_can_generate_text(self):

def test__connection_to_generative_model_can_generate_text_within_tags(self):
config = Configuration.load_local_config()
connector = RemoteLLMConnector(config.get_value("llm_model"))
connector = RemoteLLMConnector(config)
connector._num_prediction_tokens = 200
text = 'Generate a full paragraph based on this chapter title " The First Contact". The theme of the paragraph is space opera. Include the characters "Alberto" and "Maria". Write at least three sentences.'
prompt = f"""
Expand All @@ -43,7 +43,7 @@ def test__connection_to_generative_model_can_generate_text_within_tags(self):

def test__connection_to_generative_model_can_generate_a_python_list(self):
config = Configuration.load_local_config()
connector = RemoteLLMConnector(config.get_value("llm_model"))
connector = RemoteLLMConnector(config)
connector._num_prediction_tokens = 200
prompt = "Generate a Python list of 4 chapters names for a space opera book. The output needs to be a python list of strings: "
prediction = asyncio.run(
Expand Down
2 changes: 1 addition & 1 deletion tests/test_entailer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
class TestConnection(TestCase):
def test__entailer_connector(self):
config = Configuration.load_local_config()
connector = RemoteEntailerConnector(config.get_value("entailer_model"))
connector = RemoteEntailerConnector(config)
prediction = asyncio.run(
connector.predict(
"The first contact is a romance novel set in the middle ages.",
Expand Down
4 changes: 4 additions & 0 deletions todo.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
* user prior text and response from failed substitutions between [] instead of just iteration number (line 89, dialogue_answerer.py)
* remove dead code
* re-fine-tune phi to get better performance

* delete rules and memory from discourse_answerer

* This is wrong - from wafl_ll
Expand Down
11 changes: 9 additions & 2 deletions wafl/answerer/dialogue_answerer.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@ async def answer(self, query_text: str) -> Answer:
rules_text=rules_text,
dialogue=conversation,
)
await self._interface.add_fact(f"The bot predicts: {original_answer_text}")
await self._interface.add_fact(
f"The bot predicts: {original_answer_text}"
)
answer_text, memories = await self._apply_substitutions(
original_answer_text
)
Expand All @@ -86,7 +88,12 @@ async def answer(self, query_text: str) -> Answer:
except RuntimeError as e:
if self._logger:
self._logger.write(f"Error in generating answer: {e}")
conversation.add_utterance(Utterance(speaker="bot", text=f"[Trying again for the {num_attempts + 2} time.]\n"))
conversation.add_utterance(
Utterance(
speaker="bot",
text=f"[when using the answer {original_answer_text} the system says {e}]\n",
)
)

if not is_finished:
final_answer_text += "I was unable to generate a full answer. Please see the logs for more information."
Expand Down
8 changes: 6 additions & 2 deletions wafl/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@
import os
import shutil

from wafl.connectors.remote.remote_configuration_connector import RemoteConfigurationConnector
from wafl.connectors.remote.remote_configuration_connector import (
RemoteConfigurationConnector,
)

_path = os.path.dirname(__file__)

Expand All @@ -23,7 +25,9 @@ def __init__(self, filename):
with open(filename) as file:
self._data = json.load(file)

self._remote_config = RemoteConfigurationConnector(self._data["backend"]["host"], self._data["backend"]["port"])
self._remote_config = RemoteConfigurationConnector(
self._data["backend"]["host"], self._data["backend"]["port"]
)

def get_value(self, key):
if key in self._data:
Expand Down
8 changes: 5 additions & 3 deletions wafl/connectors/remote/remote_configuration_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from typing import Dict
from wafl.variables import get_variables


class RemoteConfigurationConnector:
_max_tries = 3

Expand All @@ -19,7 +20,9 @@ def __init__(self, host: str, port: int):
if (not loop or (loop and not loop.is_running())) and not asyncio.run(
self.check_connection()
):
raise RuntimeError("Cannot connect a running Configuration handler. Is WAFL-LLM running?")
raise RuntimeError(
"Cannot connect a running Configuration handler. Is WAFL-LLM running?"
)

async def predict(self) -> Dict[str, str]:
payload = {"version": get_variables()["version"]}
Expand All @@ -45,7 +48,6 @@ async def predict(self) -> Dict[str, str]:

return {}


async def check_connection(self) -> bool:
try:
async with aiohttp.ClientSession(
Expand All @@ -56,4 +58,4 @@ async def check_connection(self) -> bool:
return response.status == 200

except Exception:
return False
return False
5 changes: 1 addition & 4 deletions wafl/events/conversation_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,7 @@ async def _process_query(self, text: str):
):
await self._interface.output("I don't know what to reply")

if (
not text_is_question
and not self._interface.get_utterances_list()
):
if not text_is_question and not self._interface.get_utterances_list():
await self._interface.output("I don't know what to reply")

if (
Expand Down
1 change: 0 additions & 1 deletion wafl/extractors/__init__.py

This file was deleted.

23 changes: 0 additions & 23 deletions wafl/extractors/utils.py

This file was deleted.

4 changes: 2 additions & 2 deletions wafl/variables.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
def get_variables():
return {
"version": "0.1.2",
"version": "0.1.3",
}


def is_supported(wafl_llm_version):
supported_versions = ["0.1.0"]
supported_versions = ["0.1.1"]
return wafl_llm_version in supported_versions

0 comments on commit 16d7ee0

Please sign in to comment.