diff --git a/client/src/api/schema/schema.ts b/client/src/api/schema/schema.ts
index 779417815453..298667fd1551 100644
--- a/client/src/api/schema/schema.ts
+++ b/client/src/api/schema/schema.ts
@@ -21,6 +21,46 @@ export interface paths {
patch?: never;
trace?: never;
};
+ "/api/chat": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ put?: never;
+ /**
+ * Query
+ * @description We're off to ask the wizard
+ */
+ post: operations["query_api_chat_post"];
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
+ "/api/chat/{job_id}/feedback": {
+ parameters: {
+ query?: never;
+ header?: never;
+ path?: never;
+ cookie?: never;
+ };
+ get?: never;
+ /**
+ * Feedback
+ * @description Provide feedback on the chatbot response.
+ */
+ put: operations["feedback_api_chat__job_id__feedback_put"];
+ post?: never;
+ delete?: never;
+ options?: never;
+ head?: never;
+ patch?: never;
+ trace?: never;
+ };
"/api/configuration": {
parameters: {
query?: never;
@@ -6541,6 +6581,20 @@ export interface components {
*/
type: "change_dbkey";
};
+ /** ChatPayload */
+ ChatPayload: {
+ /**
+ * Context
+ * @description The context for the chatbot.
+ * @default
+ */
+ context: string | null;
+ /**
+ * Query
+ * @description The query to be sent to the chatbot.
+ */
+ query: string;
+ };
/** CheckForUpdatesResponse */
CheckForUpdatesResponse: {
/**
@@ -18658,6 +18712,98 @@ export interface operations {
};
};
};
+ query_api_chat_post: {
+ parameters: {
+ query: {
+ job_id: string | null;
+ };
+ header?: {
+ /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */
+ "run-as"?: string | null;
+ };
+ path?: never;
+ cookie?: never;
+ };
+ requestBody: {
+ content: {
+ "application/json": components["schemas"]["ChatPayload"];
+ };
+ };
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": string;
+ };
+ };
+ /** @description Request Error */
+ "4XX": {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MessageExceptionModel"];
+ };
+ };
+ /** @description Server Error */
+ "5XX": {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MessageExceptionModel"];
+ };
+ };
+ };
+ };
+ feedback_api_chat__job_id__feedback_put: {
+ parameters: {
+ query: {
+ feedback: number;
+ };
+ header?: {
+ /** @description The user ID that will be used to effectively make this API call. Only admins and designated users can make API calls on behalf of other users. */
+ "run-as"?: string | null;
+ };
+ path: {
+ job_id: string | null;
+ };
+ cookie?: never;
+ };
+ requestBody?: never;
+ responses: {
+ /** @description Successful Response */
+ 200: {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": number | null;
+ };
+ };
+ /** @description Request Error */
+ "4XX": {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MessageExceptionModel"];
+ };
+ };
+ /** @description Server Error */
+ "5XX": {
+ headers: {
+ [name: string]: unknown;
+ };
+ content: {
+ "application/json": components["schemas"]["MessageExceptionModel"];
+ };
+ };
+ };
+ };
index_api_configuration_get: {
parameters: {
query?: {
diff --git a/client/src/components/DatasetInformation/DatasetError.vue b/client/src/components/DatasetInformation/DatasetError.vue
index e2254fc71953..cbe9f3e40189 100644
--- a/client/src/components/DatasetInformation/DatasetError.vue
+++ b/client/src/components/DatasetInformation/DatasetError.vue
@@ -2,7 +2,7 @@
import { library } from "@fortawesome/fontawesome-svg-core";
import { faBug } from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/vue-fontawesome";
-import { BAlert, BButton } from "bootstrap-vue";
+import { BAlert, BButton, BCard } from "bootstrap-vue";
import { storeToRefs } from "pinia";
import { computed, onMounted, ref } from "vue";
@@ -16,6 +16,7 @@ import { errorMessageAsString } from "@/utils/simple-error";
import DatasetErrorDetails from "@/components/DatasetInformation/DatasetErrorDetails.vue";
import FormElement from "@/components/Form/FormElement.vue";
+import GalaxyWizard from "@/components/GalaxyWizard.vue";
library.add(faBug);
@@ -154,6 +155,21 @@ onMounted(async () => {
>.
+ Possible Causes
+
+
+ We can use AI to analyze the issue and suggest possible fixes. Please note that the diagnosis may
+ not always be accurate.
+
+
+
+
+
+
{
Issue Report
-
diff --git a/client/src/components/GalaxyWizard.vue b/client/src/components/GalaxyWizard.vue
new file mode 100644
index 000000000000..e648a3cda701
--- /dev/null
+++ b/client/src/components/GalaxyWizard.vue
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+ {{ errorMessage }}
+
+
+ Let our Help Wizard Figure it out!
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Was this answer helpful?
+
+
+
+
+
+
+ This feedback helps us improve our responses.
+ Thank you for your feedback!
+
+
+
+
+
diff --git a/client/src/composables/markdown.ts b/client/src/composables/markdown.ts
index a6659f60d404..43340deb55d9 100644
--- a/client/src/composables/markdown.ts
+++ b/client/src/composables/markdown.ts
@@ -69,9 +69,75 @@ function addRuleHeadingIncreaseLevel(engine: MarkdownIt, increaseBy: number) {
};
}
+/**
+ * Add a rule that removes newlines after list items.
+ */
+function addRuleRemoveNewlinesAfterList(engine: MarkdownIt) {
+ const defaultRenderListItemOpen =
+ engine.renderer.rules.list_item_open ||
+ function (tokens, idx, options, _env, self) {
+ return self.renderToken(tokens, idx, options);
+ };
+
+ const defaultRenderListItemClose =
+ engine.renderer.rules.list_item_close ||
+ function (tokens, idx, options, _env, self) {
+ return self.renderToken(tokens, idx, options);
+ };
+
+ const defaultRenderOrderedListOpen =
+ engine.renderer.rules.ordered_list_open ||
+ function (tokens, idx, options, _env, self) {
+ return self.renderToken(tokens, idx, options);
+ };
+
+ const defaultRenderOrderedListClose =
+ engine.renderer.rules.ordered_list_close ||
+ function (tokens, idx, options, _env, self) {
+ return self.renderToken(tokens, idx, options);
+ };
+
+ const defaultRenderBulletListOpen =
+ engine.renderer.rules.bullet_list_open ||
+ function (tokens, idx, options, _env, self) {
+ return self.renderToken(tokens, idx, options);
+ };
+
+ const defaultRenderBulletListClose =
+ engine.renderer.rules.bullet_list_close ||
+ function (tokens, idx, options, _env, self) {
+ return self.renderToken(tokens, idx, options);
+ };
+
+ engine.renderer.rules.list_item_open = function (tokens, idx, options, env, self) {
+ return defaultRenderListItemOpen(tokens, idx, options, env, self).replace(/\n+$/, "");
+ };
+
+ engine.renderer.rules.list_item_close = function (tokens, idx, options, env, self) {
+ return defaultRenderListItemClose(tokens, idx, options, env, self).replace(/\n+$/, "");
+ };
+
+ engine.renderer.rules.ordered_list_open = function (tokens, idx, options, env, self) {
+ return defaultRenderOrderedListOpen(tokens, idx, options, env, self).replace(/\n+$/, "");
+ };
+
+ engine.renderer.rules.ordered_list_close = function (tokens, idx, options, env, self) {
+ return defaultRenderOrderedListClose(tokens, idx, options, env, self).replace(/\n+$/, "");
+ };
+
+ engine.renderer.rules.bullet_list_open = function (tokens, idx, options, env, self) {
+ return defaultRenderBulletListOpen(tokens, idx, options, env, self).replace(/\n+$/, "");
+ };
+
+ engine.renderer.rules.bullet_list_close = function (tokens, idx, options, env, self) {
+ return defaultRenderBulletListClose(tokens, idx, options, env, self).replace(/\n+$/, "");
+ };
+}
+
interface UseMarkdownOptions {
openLinksInNewPage?: boolean;
increaseHeadingLevelBy?: number;
+ removeNewlinesAfterList?: boolean;
}
type RawMarkdown = string;
@@ -89,6 +155,10 @@ export function useMarkdown(options: UseMarkdownOptions = {}) {
addRuleHeadingIncreaseLevel(mdEngine, options.increaseHeadingLevelBy);
}
+ if (options.removeNewlinesAfterList) {
+ addRuleRemoveNewlinesAfterList(mdEngine);
+ }
+
function renderMarkdown(markdown: RawMarkdown): HTMLString {
return mdEngine.render(markdown);
}
diff --git a/client/src/entry/analysis/router.js b/client/src/entry/analysis/router.js
index 862a0622cc5f..033b70136cb2 100644
--- a/client/src/entry/analysis/router.js
+++ b/client/src/entry/analysis/router.js
@@ -7,6 +7,7 @@ import DatasetAttributes from "components/DatasetInformation/DatasetAttributes";
import DatasetDetails from "components/DatasetInformation/DatasetDetails";
import DatasetError from "components/DatasetInformation/DatasetError";
import FormGeneric from "components/Form/FormGeneric";
+import GalaxyWizard from "components/GalaxyWizard";
import HelpTerm from "components/Help/HelpTerm";
import HistoryExportTasks from "components/History/Export/HistoryExport";
import HistoryPublished from "components/History/HistoryPublished";
@@ -486,6 +487,10 @@ export function getRouter(Galaxy) {
path: "tours",
component: TourList,
},
+ {
+ path: "wizard",
+ component: GalaxyWizard,
+ },
{
path: "tours/:tourId",
component: TourRunner,
diff --git a/doc/source/admin/galaxy_options.rst b/doc/source/admin/galaxy_options.rst
index cb2d4c979d2b..a2a077224750 100644
--- a/doc/source/admin/galaxy_options.rst
+++ b/doc/source/admin/galaxy_options.rst
@@ -5366,6 +5366,27 @@
:Type: int
+~~~~~~~~~~~~~~~~~~
+``openai_api_key``
+~~~~~~~~~~~~~~~~~~
+
+:Description:
+ API key for OpenAI (https://openai.com/) to enable the wizard (or
+ more?)
+:Default: ``None``
+:Type: str
+
+
+~~~~~~~~~~~~~~~~
+``openai_model``
+~~~~~~~~~~~~~~~~
+
+:Description:
+ OpenAI model to enable the wizard.
+:Default: ``gpt-4o``
+:Type: str
+
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``enable_tool_recommendations``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -5659,3 +5680,6 @@
to the user. Currently only affects s3fs file sources.
:Default: ``60``
:Type: int
+
+
+
diff --git a/lib/galaxy/config/__init__.py b/lib/galaxy/config/__init__.py
index 8ccc68ffb631..d253b3d0d9b6 100644
--- a/lib/galaxy/config/__init__.py
+++ b/lib/galaxy/config/__init__.py
@@ -1091,6 +1091,9 @@ def _process_config(self, kwargs: Dict[str, Any]) -> None:
self._process_celery_config()
+ # load in the chat_prompts if openai is enabled
+ self._load_chat_prompts()
+
self.pretty_datetime_format = expand_pretty_datetime_format(self.pretty_datetime_format)
try:
with open(self.user_preferences_extra_conf_path) as stream:
@@ -1251,6 +1254,23 @@ def _load_theme(path: str, theme_dict: dict):
if self.file_source_temp_dir:
self.file_source_temp_dir = os.path.abspath(self.file_source_temp_dir)
+ def _load_chat_prompts(self):
+ if self.openai_api_key:
+ current_dir = os.path.dirname(os.path.abspath(__file__))
+ chat_prompts_path = os.path.join(current_dir, "chat_prompts.json")
+
+ if os.path.exists(chat_prompts_path):
+ try:
+ with open(chat_prompts_path, encoding="utf-8") as file:
+ data = json.load(file)
+ self.chat_prompts = data.get("prompts", {})
+ except json.JSONDecodeError as e:
+ log.error(f"JSON decoding error in chat prompts file: {e}")
+ except Exception as e:
+ log.error(f"An error occurred while reading chat prompts file: {e}")
+ else:
+ log.warning(f"Chat prompts file not found at {chat_prompts_path}")
+
def _process_celery_config(self):
if self.celery_conf and self.celery_conf.get("result_backend") is None:
# If the result_backend is not set, use a SQLite database in the data directory
diff --git a/lib/galaxy/config/chat_prompts.json b/lib/galaxy/config/chat_prompts.json
new file mode 100644
index 000000000000..93ff8e058ee7
--- /dev/null
+++ b/lib/galaxy/config/chat_prompts.json
@@ -0,0 +1,8 @@
+{
+ "prompts": {
+ "tool_error": "Adopt the persona of a galaxy project expert who is able to easily explain complex error messages to novice users in a serious manner.\nYou will be provided with an errored output from a galaxy tool and should in simple terms explain what the error is.\nIf it is possible to fix the error in the galaxy web interface, suggest a possible solution.\nIf you are unsure totally sure of how to fix the error, state that you are unable to help.\nPlease ensure your response is in well-formatted markdown."
+ }
+}
+
+
+
\ No newline at end of file
diff --git a/lib/galaxy/config/sample/galaxy.yml.sample b/lib/galaxy/config/sample/galaxy.yml.sample
index fa14f2238763..d2da6d1723ca 100644
--- a/lib/galaxy/config/sample/galaxy.yml.sample
+++ b/lib/galaxy/config/sample/galaxy.yml.sample
@@ -1,21 +1,21 @@
# Galaxy is configured by default to be usable in a single-user development
# environment. To tune the application for a multi-user production
# environment, see the documentation at:
-#
+#
# https://docs.galaxyproject.org/en/master/admin/production.html
-#
+#
# Throughout this sample configuration file, except where stated otherwise,
# uncommented values override the default if left unset, whereas commented
# values are set to the default value. Relative paths are relative to the root
# Galaxy directory.
-#
+#
# Examples of many of these options are explained in more detail in the Galaxy
# Community Hub.
-#
+#
# https://galaxyproject.org/admin/config
-#
+#
# Config hackers are encouraged to check there before asking for help.
-#
+#
# Configuration for Gravity process manager.
# ``uwsgi:`` section will be ignored if Galaxy is started via Gravity commands (e.g ``./run.sh``, ``galaxy`` or ``galaxyctl``).
gravity:
@@ -2882,6 +2882,13 @@ galaxy:
# as threshold (above threshold: regular select fields will be used)
#select_type_workflow_threshold: -1
+ # API key for OpenAI (https://openai.com/) to enable the wizard (or
+ # more?)
+ #openai_api_key: null
+
+ # OpenAI model to enable the wizard.
+ #openai_model: gpt-4o
+
# Allow the display of tool recommendations in workflow editor and
# after tool execution. If it is enabled and set to true, please
# enable 'tool_recommendation_model_path' as well
@@ -3007,3 +3014,4 @@ galaxy:
# but outdated contents might be displayed to the user. Currently only
# affects s3fs file sources.
#file_source_listings_expiry_time: 60
+
diff --git a/lib/galaxy/config/sample/tool_shed.yml.sample b/lib/galaxy/config/sample/tool_shed.yml.sample
index bc995ed597d0..d0a8aa456ccc 100644
--- a/lib/galaxy/config/sample/tool_shed.yml.sample
+++ b/lib/galaxy/config/sample/tool_shed.yml.sample
@@ -429,3 +429,4 @@ tool_shed:
# The value of this option will be resolved with respect to
# .
#datatypes_config_file: datatypes_conf.xml
+
diff --git a/lib/galaxy/config/schemas/config_schema.yml b/lib/galaxy/config/schemas/config_schema.yml
index f0ee832e34ec..e659efd6d21c 100644
--- a/lib/galaxy/config/schemas/config_schema.yml
+++ b/lib/galaxy/config/schemas/config_schema.yml
@@ -3923,6 +3923,19 @@ mapping:
use -1 (default) in order to always use the regular select fields,
use any other positive number as threshold (above threshold: regular select fields will be used)
+ openai_api_key:
+ type: str
+ required: false
+ desc: |
+ API key for OpenAI (https://openai.com/) to enable the wizard (or more?)
+
+ openai_model:
+ type: str
+ default: gpt-4o
+ required: false
+ desc: |
+ OpenAI model to enable the wizard.
+
enable_tool_recommendations:
type: bool
default: false
diff --git a/lib/galaxy/dependencies/__init__.py b/lib/galaxy/dependencies/__init__.py
index d7a9d0740048..5431db647587 100644
--- a/lib/galaxy/dependencies/__init__.py
+++ b/lib/galaxy/dependencies/__init__.py
@@ -290,6 +290,9 @@ def check_influxdb(self):
def check_tensorflow(self):
return asbool(self.config["enable_tool_recommendations"])
+ def check_openai(self):
+ return self.config.get("openai_api_key", None) is not None
+
def check_weasyprint(self):
# See notes in ./conditional-requirements.txt for more information.
return os.environ.get("GALAXY_DEPENDENCIES_INSTALL_WEASYPRINT") == "1"
diff --git a/lib/galaxy/dependencies/conditional-requirements.txt b/lib/galaxy/dependencies/conditional-requirements.txt
index 168fea8adcd0..8351e2b01ba4 100644
--- a/lib/galaxy/dependencies/conditional-requirements.txt
+++ b/lib/galaxy/dependencies/conditional-requirements.txt
@@ -14,6 +14,7 @@ python-pam
galaxycloudrunner
pkce
total-perspective-vortex>=2.3.2,<3
+openai
# For file sources plugins
fs.webdavfs>=0.4.2 # type: webdav
diff --git a/lib/galaxy/managers/chat.py b/lib/galaxy/managers/chat.py
new file mode 100644
index 000000000000..ecf651602684
--- /dev/null
+++ b/lib/galaxy/managers/chat.py
@@ -0,0 +1,99 @@
+from typing import Union
+
+from sqlalchemy import (
+ and_,
+ select,
+)
+from sqlalchemy.exc import (
+ MultipleResultsFound,
+ NoResultFound,
+)
+
+from galaxy.exceptions import (
+ InconsistentDatabase,
+ InternalServerError,
+ RequestParameterInvalidException,
+)
+from galaxy.managers.context import ProvidesUserContext
+from galaxy.model import ChatExchange
+from galaxy.model.base import transaction
+from galaxy.util import unicodify
+
+
+class ChatManager:
+ """
+ Business logic for chat exchanges.
+ """
+
+ def create(self, trans: ProvidesUserContext, job_id: int, message: str) -> ChatExchange:
+ """
+ Create a new chat exchange in the DB. Currently these are *only* job-based chat exchanges, will need to generalize down the road.
+ :param job_id: id of the job to associate the response with
+ :type job_id: int
+ :param response: the response to save in the DB
+ :type response: str
+ :returns: the created ChatExchange object
+ :rtype: galaxy.model.ChatExchange
+ :raises: InternalServerError
+ """
+ chat_exchange = ChatExchange(user=trans.user, job_id=job_id, message=message)
+ trans.sa_session.add(chat_exchange)
+ with transaction(trans.sa_session):
+ trans.sa_session.commit()
+ return chat_exchange
+
+ def get(self, trans: ProvidesUserContext, job_id: int) -> Union[ChatExchange, None]:
+ """
+ Returns the chat response from the DB based on the given job id.
+ :param job_id: id of the job to load a response for from the DB
+ :type job_id: int
+ :returns: the loaded ChatExchange object
+ :rtype: galaxy.model.ChatExchange
+ :raises: InconsistentDatabase, InternalServerError
+ """
+ try:
+ stmt = select(ChatExchange).where(
+ and_(ChatExchange.job_id == job_id, ChatExchange.user_id == trans.user.id)
+ )
+ chat_response = trans.sa_session.execute(stmt).scalar_one()
+ except MultipleResultsFound:
+ # TODO: Unsure about this, isn't this more applicable when we're getting the response for response.id instead of response.
+ raise InconsistentDatabase("Multiple chat responses found with the same job id.")
+ except NoResultFound:
+ # TODO: Would there be cases where we raise an exception here? Or, is there a better way to return None?
+ # raise RequestParameterInvalidException("No accessible response found with the id provided.")
+ return None
+ except Exception as e:
+ raise InternalServerError(f"Error loading from the database.{unicodify(e)}")
+ return chat_response
+
+ def set_feedback_for_job(self, trans: ProvidesUserContext, job_id: int, feedback: int) -> ChatExchange:
+ """
+ Set the feedback for a chat response.
+ :param message_id: id of the job to associate the feedback with
+ :type message_id: int
+ :param feedback: the feedback to save in the DB (0 or 1)
+ :type feedback: int
+ :returns: the updated ChatExchange object
+ :rtype: galaxy.model.ChatExchange
+ :raises: RequestParameterInvalidException
+ """
+
+ # TODO: Set feedback for specific messages as we allow multiple messages per exchange, not this method targeting job.
+
+ # Validate the feedback; it should be 0 or 1
+ if feedback not in [0, 1]:
+ raise RequestParameterInvalidException("Feedback should be 0 or 1.")
+
+ chat_exchange = self.get(trans, job_id)
+
+ if not chat_exchange:
+ raise RequestParameterInvalidException("No accessible response found with the id provided.")
+
+ # There is only one message in an exchange currently, so we can set the feedback on the first message
+ chat_exchange.messages[0].feedback = feedback
+
+ with transaction(trans.sa_session):
+ trans.sa_session.commit()
+
+ return chat_exchange
diff --git a/lib/galaxy/model/__init__.py b/lib/galaxy/model/__init__.py
index f1015a284738..7eb5fa19338e 100644
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -805,6 +805,7 @@ class User(Base, Dictifiable, RepresentById):
back_populates="user", order_by=lambda: desc(UserAddress.update_time), cascade_backrefs=False
)
custos_auth: Mapped[List["CustosAuthnzToken"]] = relationship(back_populates="user")
+ chat_exchanges: Mapped[List["ChatExchange"]] = relationship(back_populates="user")
default_permissions: Mapped[List["DefaultUserPermissions"]] = relationship(back_populates="user")
groups: Mapped[List["UserGroupAssociation"]] = relationship(back_populates="user")
histories: Mapped[List["History"]] = relationship(
@@ -2972,6 +2973,43 @@ class GenomeIndexToolData(Base, RepresentById): # TODO: params arg is lost
user: Mapped[Optional["User"]] = relationship()
+class ChatExchange(Base, RepresentById):
+
+ __tablename__ = "chat_exchange"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ user_id: Mapped[int] = mapped_column(ForeignKey("galaxy_user.id"), index=True, nullable=False)
+ job_id: Mapped[Optional[int]] = mapped_column(ForeignKey("job.id"), index=True, nullable=True)
+
+ user: Mapped["User"] = relationship(back_populates="chat_exchanges")
+ messages: Mapped[List["ChatExchangeMessage"]] = relationship(back_populates="chat_exchange")
+
+ def __init__(self, user, job_id=None, message=None, **kwargs):
+ self.user = user
+ self.job_id = job_id
+ self.messages = []
+ if message:
+ self.add_message(message)
+
+ def add_message(self, message):
+ self.messages.append(ChatExchangeMessage(message=message))
+
+
+class ChatExchangeMessage(Base, RepresentById):
+ __tablename__ = "chat_exchange_message"
+
+ id: Mapped[int] = mapped_column(primary_key=True)
+ chat_exchange_id: Mapped[int] = mapped_column(ForeignKey("chat_exchange.id"), index=True)
+ create_time: Mapped[datetime] = mapped_column(default=now)
+ message: Mapped[str] = mapped_column(Text)
+ feedback: Mapped[Optional[int]] = mapped_column(Integer)
+ chat_exchange: Mapped["ChatExchange"] = relationship("ChatExchange", back_populates="messages")
+
+ def __init__(self, message, feedback=None):
+ self.message = message
+ self.feedback = feedback
+
+
class Group(Base, Dictifiable, RepresentById):
__tablename__ = "galaxy_group"
diff --git a/lib/galaxy/model/migrations/alembic/versions_gxy/cbc46035eba0_chat_exchange_storage.py b/lib/galaxy/model/migrations/alembic/versions_gxy/cbc46035eba0_chat_exchange_storage.py
new file mode 100644
index 000000000000..c5767ac7ace7
--- /dev/null
+++ b/lib/galaxy/model/migrations/alembic/versions_gxy/cbc46035eba0_chat_exchange_storage.py
@@ -0,0 +1,52 @@
+"""Chat exchange storage.
+
+Revision ID: cbc46035eba0
+Revises: b855b714e8b8
+Create Date: 2023-06-05 13:23:42.050738
+
+"""
+
+# from alembic import op
+# import sqlalchemy as sa
+
+from sqlalchemy import (
+ Column,
+ DateTime,
+ ForeignKey,
+ Integer,
+ Text,
+)
+
+from galaxy.model.migrations.util import (
+ create_table,
+ drop_table,
+)
+
+# revision identifiers, used by Alembic.
+revision = "cbc46035eba0"
+down_revision = "a99a5b52ccb8"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ create_table(
+ "chat_exchange",
+ Column("id", Integer, primary_key=True),
+ Column("user_id", Integer, ForeignKey("galaxy_user.id")),
+ Column("job_id", Integer, ForeignKey("job.id"), nullable=True),
+ )
+
+ create_table(
+ "chat_exchange_message",
+ Column("id", Integer, primary_key=True),
+ Column("chat_exchange_id", Integer, ForeignKey("chat_exchange.id")),
+ Column("create_time", DateTime),
+ Column("message", Text),
+ Column("feedback", Integer, nullable=True),
+ )
+
+
+def downgrade():
+ drop_table("chat_exchange_message")
+ drop_table("chat_exchange")
diff --git a/lib/galaxy/schema/schema.py b/lib/galaxy/schema/schema.py
index 077dc8e29bb7..e758e03a6861 100644
--- a/lib/galaxy/schema/schema.py
+++ b/lib/galaxy/schema/schema.py
@@ -3707,6 +3707,19 @@ class MaterializeDatasetInstanceRequest(MaterializeDatasetInstanceAPIRequest):
history_id: DecodedDatabaseIdField
+class ChatPayload(Model):
+ query: str = Field(
+ ...,
+ title="Query",
+ description="The query to be sent to the chatbot.",
+ )
+ context: Optional[str] = Field(
+ default="",
+ title="Context",
+ description="The context for the chatbot.",
+ )
+
+
class CreatePagePayload(PageSummaryBase):
content_format: PageContentFormat = ContentFormatField
content: Optional[str] = ContentField
diff --git a/lib/galaxy/webapps/galaxy/api/chat.py b/lib/galaxy/webapps/galaxy/api/chat.py
new file mode 100644
index 000000000000..de32b6a452f4
--- /dev/null
+++ b/lib/galaxy/webapps/galaxy/api/chat.py
@@ -0,0 +1,138 @@
+"""
+API Controller providing Chat functionality
+"""
+
+import logging
+from typing import (
+ Optional,
+ Union,
+)
+
+from fastapi import Path
+from typing_extensions import Annotated
+
+from galaxy.config import GalaxyAppConfiguration
+from galaxy.exceptions import ConfigurationError
+from galaxy.managers.chat import ChatManager
+from galaxy.managers.context import ProvidesUserContext
+from galaxy.managers.jobs import JobManager
+from galaxy.schema.fields import DecodedDatabaseIdField
+from galaxy.schema.schema import ChatPayload
+from galaxy.webapps.galaxy.api import (
+ depends,
+ DependsOnTrans,
+ Router,
+)
+
+try:
+ import openai
+except ImportError:
+ openai = None
+
+log = logging.getLogger(__name__)
+
+router = Router(tags=["chat"])
+
+DEFAULT_PROMPT = """
+Please only say that something went wrong when configuing the ai prompt in your response.
+"""
+
+JobIdPathParam = Optional[
+ Annotated[
+ DecodedDatabaseIdField,
+ Path(title="Job ID", description="The Job ID the chat exchange is linked to."),
+ ]
+]
+
+
+@router.cbv
+class ChatAPI:
+ config: GalaxyAppConfiguration = depends(GalaxyAppConfiguration)
+ chat_manager: ChatManager = depends(ChatManager)
+ job_manager: JobManager = depends(JobManager)
+
+ @router.post("/api/chat")
+ def query(
+ self,
+ job_id: JobIdPathParam,
+ payload: ChatPayload,
+ trans: ProvidesUserContext = DependsOnTrans,
+ ) -> str:
+ """We're off to ask the wizard"""
+ # Currently job-based chat exchanges are the only ones supported,
+ # and will only have the one message.
+ job = self.job_manager.get_accessible_job(trans, job_id)
+ if job:
+ # If there's an existing response for this job, just return that one for now.
+ # TODO: Support regenerating the response as a new message, and
+ # asking follow-up questions.
+ existing_response = self.chat_manager.get(trans, job.id)
+ if existing_response and existing_response.messages[0]:
+ return existing_response.messages[0].message
+
+ self._ensure_openai_configured()
+
+ messages = self._build_messages(payload, trans)
+ response = self._call_openai(messages)
+ answer = response.choices[0].message.content
+
+ if job:
+ self.chat_manager.create(trans, job.id, answer)
+
+ return answer
+
+ @router.put("/api/chat/{job_id}/feedback")
+ def feedback(
+ self,
+ job_id: JobIdPathParam,
+ feedback: int,
+ trans: ProvidesUserContext = DependsOnTrans,
+ ) -> Union[int, None]:
+ """Provide feedback on the chatbot response."""
+ job = self.job_manager.get_accessible_job(trans, job_id)
+ chat_response = self.chat_manager.set_feedback_for_job(trans, job.id, feedback)
+ return chat_response.messages[0].feedback
+
+ def _ensure_openai_configured(self):
+ """Ensure OpenAI is available and configured with an API key."""
+ if openai is None:
+ raise ConfigurationError("OpenAI is not installed. Please install openai to use this feature.")
+ if self.config.openai_api_key is None:
+ raise ConfigurationError("OpenAI is not configured for this instance.")
+ openai.api_key = self.config.openai_api_key
+
+ def _get_system_prompt(self) -> str:
+ """Get the system prompt for OpenAI."""
+ return self.config.chat_prompts.get("tool_error", DEFAULT_PROMPT)
+
+ def _build_messages(self, payload: ChatPayload, trans: ProvidesUserContext) -> list:
+ """Build the message array to send to OpenAI."""
+ messages = [
+ {"role": "system", "content": self._get_system_prompt()},
+ {"role": "user", "content": payload.query},
+ ]
+
+ user_msg = self._get_user_context_message(trans)
+ if user_msg:
+ messages.append({"role": "system", "content": user_msg})
+
+ return messages
+
+ def _get_user_context_message(self, trans: ProvidesUserContext) -> str:
+ """Generate a user context message based on the user's information."""
+ user = trans.user
+ if user:
+ log.debug(f"CHATGPTuser: {user.username}")
+ return f"You will address the user as {user.username}"
+ return "You will address the user as Anonymous User"
+
+ def _call_openai(self, messages: list):
+ """Send a chat request to OpenAI and handle exceptions."""
+ try:
+ return openai.chat.completions.create(
+ model=self.config.openai_model,
+ messages=messages,
+ )
+ except Exception as e:
+ log.error(f"Error calling OpenAI: {e}")
+ raise ConfigurationError("An error occurred while communicating with OpenAI.")
diff --git a/lib/galaxy/webapps/galaxy/buildapp.py b/lib/galaxy/webapps/galaxy/buildapp.py
index e1897420f9a5..9c1f9de277e6 100644
--- a/lib/galaxy/webapps/galaxy/buildapp.py
+++ b/lib/galaxy/webapps/galaxy/buildapp.py
@@ -284,6 +284,7 @@ def app_pair(global_conf, load_app_kwds=None, wsgi_preflight=True, **kwargs):
webapp.add_client_route("/collection/{collection_id}/edit")
webapp.add_client_route("/jobs/submission/success")
webapp.add_client_route("/jobs/{job_id}/view")
+ webapp.add_client_route("/wizard")
webapp.add_client_route("/workflows/list")
webapp.add_client_route("/workflows/list_published")
webapp.add_client_route("/workflows/list_shared_with_me")