Skip to content

Commit

Permalink
Enable completion template customization, including suffix
Browse files Browse the repository at this point in the history
  • Loading branch information
krassowski committed Jan 25, 2024
1 parent 31c572d commit 7eb8518
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 48 deletions.
63 changes: 56 additions & 7 deletions packages/jupyter-ai-magics/jupyter_ai_magics/providers.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
from pydantic.main import ModelMetaclass


SYSTEM_PROMPT = """
CHAT_SYSTEM_PROMPT = """
You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
You are not a language model, but rather an application built on a foundation model from {provider_name} called {local_model_id}.
You are talkative and you provide lots of specific details from the foundation model's context.
Expand All @@ -59,12 +59,38 @@
The following is a friendly conversation between you and a human.
""".strip()

DEFAULT_TEMPLATE = """Current conversation:
CHAT_DEFAULT_TEMPLATE = """Current conversation:
{history}
Human: {input}
AI:"""


COMPLETION_SYSTEM_PROMPT = """
You are an application built to provide helpful code completion suggestions.
You should only produce code. Keep comments to minimum, use the
programming language comment syntax. Produce clean code.
The code is written in JupyterLab, a data analysis and code development
environment which can execute code extended with additional syntax for
interactive features, such as magics.
""".strip()

# only add the suffix bit if present to save input tokens/computation time
COMPLETION_DEFAULT_TEMPLATE = """
The document is called `{{filename}}` and written in {{language}}.
{% if suffix %}
The code after the completion request is:
```
{{suffix}}
```
{% endif %}
Complete the following code:
```
{{prefix}}"""


class EnvAuthStrategy(BaseModel):
"""Require one auth token via an environment variable."""

Expand Down Expand Up @@ -297,21 +323,44 @@ def get_chat_prompt_template(self) -> PromptTemplate:
if self.is_chat_provider:
return ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT).format(
provider_name=name, local_model_id=self.model_id
),
SystemMessagePromptTemplate.from_template(
CHAT_SYSTEM_PROMPT
).format(provider_name=name, local_model_id=self.model_id),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
else:
return PromptTemplate(
input_variables=["history", "input"],
template=SYSTEM_PROMPT.format(
template=CHAT_SYSTEM_PROMPT.format(
provider_name=name, local_model_id=self.model_id
)
+ "\n\n"
+ DEFAULT_TEMPLATE,
+ CHAT_DEFAULT_TEMPLATE,
)

def get_inline_completion_prompt_template(self) -> PromptTemplate:
"""
Produce a prompt template optimised for code or text completion.
The template should take variables: prefix, suffix, language, filename.
"""
if self.is_chat_provider:
return ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(COMPLETION_SYSTEM_PROMPT),
HumanMessagePromptTemplate.from_template(
COMPLETION_DEFAULT_TEMPLATE, template_format="jinja2"
),
]
)
else:
return PromptTemplate(
input_variables=["prefix", "suffix", "language", "filename"],
template=COMPLETION_SYSTEM_PROMPT
+ "\n\n"
+ COMPLETION_DEFAULT_TEMPLATE,
template_format="jinja2",
)

@property
Expand Down
43 changes: 2 additions & 41 deletions packages/jupyter-ai/jupyter_ai/completions/handlers/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,32 +18,6 @@
)
from .base import BaseInlineCompletionHandler

SYSTEM_PROMPT = """
You are an application built to provide helpful code completion suggestions.
You should only produce code. Keep comments to minimum, use the
programming language comment syntax. Produce clean code.
The code is written in JupyterLab, a data analysis and code development
environment which can execute code extended with additional syntax for
interactive features, such as magics.
""".strip()

AFTER_TEMPLATE = """
The code after the completion request is:
```
{suffix}
```
""".strip()

DEFAULT_TEMPLATE = """
The document is called `{filename}` and written in {language}.
{after}
Complete the following code:
```
{prefix}"""


class DefaultInlineCompletionHandler(BaseInlineCompletionHandler):
llm_chain: Runnable
Expand All @@ -57,18 +31,7 @@ def create_llm_chain(
model_parameters = self.get_model_parameters(provider, provider_params)
llm = provider(**provider_params, **model_parameters)

if llm.is_chat_provider:
prompt_template = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT),
HumanMessagePromptTemplate.from_template(DEFAULT_TEMPLATE),
]
)
else:
prompt_template = PromptTemplate(
input_variables=["prefix", "suffix", "language", "filename"],
template=SYSTEM_PROMPT + "\n\n" + DEFAULT_TEMPLATE,
)
prompt_template = llm.get_inline_completion_prompt_template()

self.llm = llm
self.llm_chain = prompt_template | llm | StrOutputParser()
Expand Down Expand Up @@ -151,13 +114,11 @@ def _token_from_request(self, request: InlineCompletionRequest, suggestion: int)

def _template_inputs_from_request(self, request: InlineCompletionRequest) -> Dict:
suffix = request.suffix.strip()
# only add the suffix template if the suffix is there to save input tokens/computation time
after = AFTER_TEMPLATE.format(suffix=suffix) if suffix else ""
filename = request.path.split("/")[-1] if request.path else "untitled"

return {
"prefix": request.prefix,
"after": after,
"suffix": suffix,
"language": request.language,
"filename": filename,
"stop": ["\n```"],
Expand Down

0 comments on commit 7eb8518

Please sign in to comment.