Skip to content

Commit

Permalink
Test for llm models --options, refs #169
Browse files Browse the repository at this point in the history
  • Loading branch information
simonw committed Aug 20, 2023
1 parent 0537622 commit 36f8ffc
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 1 deletion.
5 changes: 4 additions & 1 deletion llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -599,10 +599,13 @@ def models_list(options):
for name, field in model_with_aliases.model.Options.schema()[
"properties"
].items():
any_of = field.get("anyOf")
if any_of is None:
any_of = [{"type": field["type"]}]
types = ", ".join(
[
_type_lookup.get(item["type"], item["type"])
for item in field["anyOf"]
for item in any_of
if item["type"] != "null"
]
)
Expand Down
37 changes: 37 additions & 0 deletions tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,3 +298,40 @@ def test_openai_localai_configuration(mocked_localai, user_path):
"messages": [{"role": "user", "content": "three names\nfor a pet pelican"}],
"stream": False,
}


EXPECTED_OPTIONS = """
OpenAI Chat: gpt-3.5-turbo (aliases: 3.5, chatgpt)
temperature: float
What sampling temperature to use, between 0 and 2. Higher values like
0.8 will make the output more random, while lower values like 0.2 will
make it more focused and deterministic.
max_tokens: int
Maximum number of tokens to generate.
top_p: float
An alternative to sampling with temperature, called nucleus sampling,
where the model considers the results of the tokens with top_p
probability mass. So 0.1 means only the tokens comprising the top 10%
probability mass are considered. Recommended to use top_p or
temperature but not both.
frequency_penalty: float
Number between -2.0 and 2.0. Positive values penalize new tokens based
on their existing frequency in the text so far, decreasing the model's
likelihood to repeat the same line verbatim.
presence_penalty: float
Number between -2.0 and 2.0. Positive values penalize new tokens based
on whether they appear in the text so far, increasing the model's
likelihood to talk about new topics.
stop: str
A string where the API will stop generating further tokens.
logit_bias: dict, str
Modify the likelihood of specified tokens appearing in the completion.
Pass a JSON string like '{"1712":-100, "892":-100, "1489":-100}'
"""


def test_llm_models_options(user_path):
runner = CliRunner()
result = runner.invoke(cli, ["models", "--options"], catch_exceptions=False)
assert result.exit_code == 0
assert EXPECTED_OPTIONS.strip() in result.output

0 comments on commit 36f8ffc

Please sign in to comment.