Skip to content

Commit

Permalink
Improve LLM use case and fix dashboard bugs.
Browse files Browse the repository at this point in the history
  • Loading branch information
muazhari committed Aug 7, 2024
1 parent 174edaf commit 68ff457
Show file tree
Hide file tree
Showing 18 changed files with 380 additions and 1,562 deletions.
2 changes: 1 addition & 1 deletion .env.example
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
openai_api_key=
OPENAI_API_KEY=
LANGCHAIN_TRACING_V2=true
LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
LANGCHAIN_PROJECT=autocode
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,15 @@ companies.
1. Install the requirements

```bash
pip install autocode-py
pip install -U autocode-py
```

2. Prepare software to be processed as in the [`./example/client`](https://github.com/muazhari/autocode/tree/main/example/client) folder.
3. Prepare deployment as in the [`./example/client/docker-compose.yml`](https://github.com/muazhari/autocode/blob/main/example/client/docker-compose.yml) file.
4. Prepare controller as in the [`./example/controller.ipynb`](https://github.com/muazhari/autocode/blob/main/example/controller.ipynb) file.
5. Instantiate `optimization` and execute `optimization.deploy()` in controller.
6. Open dashboard in `http://localhost:{dashboard_port}/` to see the process in real-time.
7. Wait until all client are ready (need to wait for long time because the libraries need to be re-download for each client & refresh the dashboard manually because of bug).
7. Wait until all client are ready (need to wait for long time because the libraries need to be re-downloaded for each client).
8. Execute `optimization.run()` in controller.
9. Wait until the run is finished.
10. Analyze and decide the best values.
Expand Down
36 changes: 16 additions & 20 deletions autocode/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,16 +55,19 @@
client: OptimizationClient = dill.loads(client_cache.value)
variables.update(client.variables)
clients[client.id] = client
client_df_list.append(client.model_dump(mode="json"))
client_json: Dict[str, Any] = client.model_dump(mode="json")
client_json["port"] = str(client.port)
client_df_list.append(client_json)

with client_df_placeholder:
client_df: pd.DataFrame = pd.DataFrame(client_df_list)
client_df = client_df.astype(dtype=str)
client_df: pd.DataFrame = pd.DataFrame(client_df_list)

with client_df_placeholder.container():
st.dataframe(client_df, height=500)

if len(objective_caches) > 0:
break

session.close()
time.sleep(0.01)

if len(objective_caches) == 0 and len(client_caches) == 0:
Expand All @@ -91,12 +94,7 @@
else:
st.error("Preparation data cache is not valid.")

plot_0_placeholder = st.empty()
plot_1_placeholder = st.empty()
st.subheader("Objective Space")
plot_f_df_placeholder = st.empty()
st.subheader("Solution Space")
plot_x_df_placeholder = st.empty()
plot_placeholder = st.empty()

while True:
try:
Expand Down Expand Up @@ -137,12 +135,6 @@
decision_index=decision_index
)

with plot_0_placeholder:
st.pyplot(plots[0].fig)

with plot_1_placeholder:
st.pyplot(plots[1].fig)

list_dict_x: List[List[Dict[str, Any]]] = []
list_dict_f: List[Dict[str, Any]] = []
for index, (x, f) in enumerate(zip(result.X, result.F)):
Expand Down Expand Up @@ -178,13 +170,17 @@
dict_f[f"f{index + 1}"] = f_value
list_dict_f.append(dict_f)

with plot_f_df_placeholder:
f_df: pd.DataFrame = pd.DataFrame(list_dict_f)
st.dataframe(f_df, height=500)
f_df: pd.DataFrame = pd.DataFrame(list_dict_f)

with plot_x_df_placeholder:
with plot_placeholder.container():
st.subheader("Objective Space")
st.pyplot(plots[0].fig)
st.pyplot(plots[1].fig)
st.subheader("Solution Space")
st.dataframe(f_df, height=500)
st.json(list_dict_x, expanded=False)

st.session_state["old_result_caches"].add(cache)

session.close()
time.sleep(0.01)
Binary file modified autocode/database.db
Binary file not shown.
32 changes: 13 additions & 19 deletions autocode/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,42 +22,35 @@ class BaseModel(PydanticBaseModelV2):

class CodeScoring(BaseModelV1):
"""
Score code in based on the following statements:
Error Potentiality - this code is potentially error-prone;
Readability - this code is easy to read;
Understandability - the semantic meaning of this code is clear;
Complexity - this code is complex;
Modularity - this code should be broken into smaller pieces;
Overall maintainability - overall, this code is maintainable.
The score scale from 1 (strongly agree) to 100 (strongly disagree).
You must score in precision, i.e. 14.3, 47.456, 75.45, 58.58495, 3.141598, etc.
Score the code.
"""
error_potentiality: float = FieldV1(description="Error potential score.")
analysis: List[str] = FieldV1(description="Step-by-step analysis before scoring the code.")
readability: float = FieldV1(description="Readability score.")
understandability: float = FieldV1(description="Understandability score.")
complexity: float = FieldV1(description="Complexity score.")
modularity: float = FieldV1(description="Modularity score.")
error_potentiality: float = FieldV1(description="Error potential score.")
overall_maintainability: float = FieldV1(description="Overall maintainability score.")


class CodeVariation(BaseModelV1):
"""
Code variation is a code snippet that is a variation of the original code.
Propose code variation.
"""
variation: Optional[str] = FieldV1(description="Code variation.", default=None)
analysis: List[str] = FieldV1(description="Step-by-step analysis before proposing the code variation.")
variation: str = FieldV1(description="Proposed code variation.")


class ScoringState(TypedDict):
code: str
analysis: str
score: List[CodeScoring]
programming_language: str
existing_code: str
score: CodeScoring


class VariationState(TypedDict):
code: str
analysis: str
variation: List[CodeVariation]
new_function_name: str
programming_language: str
existing_code: str
variations: List[CodeVariation]


class OptimizationVariable(BaseModel):
Expand Down Expand Up @@ -141,6 +134,7 @@ class OptimizationClient(BaseModel):


class OptimizationPrepareRequest(BaseModel):
language: str
variables: Dict[str, OptimizationBinary | OptimizationChoice | OptimizationInteger | OptimizationReal]
host: Optional[str] = Field(default=None)
port: int
Expand Down
2 changes: 1 addition & 1 deletion autocode/setting.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@ class ApplicationSetting(BaseSettings):
server_host: str = Field(default="0.0.0.0")
server_port: int = Field(default=10000)
dashboard_port: int = Field(default=10001)
openai_api_key: str = Field(default="")
OPENAI_API_KEY: str = Field(default="")
Loading

0 comments on commit 68ff457

Please sign in to comment.