diff --git a/.github/workflows/mysql8-migrations.yml b/.github/workflows/mysql8-migrations.yml index 479efe72..58b04ee1 100644 --- a/.github/workflows/mysql8-migrations.yml +++ b/.github/workflows/mysql8-migrations.yml @@ -52,7 +52,7 @@ jobs: pip uninstall -y mysqlclient pip install --no-binary mysqlclient mysqlclient pip uninstall -y xmlsec - pip install --no-binary xmlsec xmlsec + pip install --no-binary xmlsec xmlsec==1.3.13 - name: Initiate Services run: | diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0eb6b807..f8cbc3ca 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -13,6 +13,10 @@ Change Log Unreleased +[1.51.0] - 2024-07-03 +--------------------- +* feat: Replaced client for ai chat + [1.50.0] - 2024-03-27 --------------------- * feat: Skill validation can be disbaled for a course or an organization diff --git a/taxonomy/__init__.py b/taxonomy/__init__.py index 4d14b545..cca21f13 100644 --- a/taxonomy/__init__.py +++ b/taxonomy/__init__.py @@ -15,6 +15,6 @@ # 2. MINOR version when you add functionality in a backwards compatible manner, and # 3. PATCH version when you make backwards compatible bug fixes. # More details can be found at https://semver.org/ -__version__ = '1.50.0' +__version__ = '1.51.0' default_app_config = 'taxonomy.apps.TaxonomyConfig' # pylint: disable=invalid-name diff --git a/taxonomy/openai/client.py b/taxonomy/openai/client.py index 10521e6b..a3278d1e 100644 --- a/taxonomy/openai/client.py +++ b/taxonomy/openai/client.py @@ -1,25 +1,45 @@ -"""openai client""" +"""CHAT_COMPLETION_API client""" +import json +import logging -import openai +import requests +from requests.exceptions import ConnectTimeout from django.conf import settings -openai.api_key = settings.OPENAI_API_KEY +log = logging.getLogger(__name__) def chat_completion(prompt): """ - Use chatGPT https://api.openai.com/v1/chat/completions endpoint to generate a response. - + Pass message list to chat endpoint, as defined by the CHAT_COMPLETION_API setting. Arguments: prompt (str): chatGPT prompt """ - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "user", "content": prompt}, - ] - ) + completion_endpoint = getattr(settings, 'CHAT_COMPLETION_API', None) + completion_endpoint_key = getattr(settings, 'CHAT_COMPLETION_API_KEY', None) + if completion_endpoint and completion_endpoint_key: + headers = {'Content-Type': 'application/json', 'x-api-key': completion_endpoint_key} + connect_timeout = getattr(settings, 'CHAT_COMPLETION_API_CONNECT_TIMEOUT', 1) + read_timeout = getattr(settings, 'CHAT_COMPLETION_API_READ_TIMEOUT', 15) + body = {'message_list': [{'role': 'assistant', 'content': prompt},]} + try: + response = requests.post( + completion_endpoint, + headers=headers, + data=json.dumps(body), + timeout=(connect_timeout, read_timeout) + ) + chat = response.json().get('content') + except (ConnectTimeout, ConnectionError) as e: + error_message = str(e) + connection_message = 'Failed to connect to chat completion API.' + log.error( + '%(connection_message)s %(error)s', + {'connection_message': connection_message, 'error': error_message} + ) + chat = connection_message + else: + chat = 'Completion endpoint is not defined.' - content = response['choices'][0]['message']['content'] - return content + return chat diff --git a/test_settings.py b/test_settings.py index 2e7b88eb..282c66d2 100644 --- a/test_settings.py +++ b/test_settings.py @@ -114,7 +114,8 @@ def root(*args): SKILLS_IGNORED_THRESHOLD = 10 SKILLS_IGNORED_RATIO_THRESHOLD = 0.8 -OPENAI_API_KEY = 'I am a key' +CHAT_COMPLETION_API = 'http://test.chat.ai' +CHAT_COMPLETION_API_KEY = 'test chat completion api key' JOB_DESCRIPTION_PROMPT = 'Generate a description for {job_name} job role.' JOB_TO_JOB_DESCRIPTION_PROMPT = 'How can a {current_job_name} switch to {future_job_name} job role.' diff --git a/tests/openai/__init__.py b/tests/openai/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/openai/test_client.py b/tests/openai/test_client.py new file mode 100644 index 00000000..209836db --- /dev/null +++ b/tests/openai/test_client.py @@ -0,0 +1,52 @@ +""" +Tests for chat completion client. +""" +import responses +from mock import patch + +from django.conf import settings + +from taxonomy.openai.client import chat_completion +from test_utils.testcase import TaxonomyTestCase + + +class TestChatCompletionClient(TaxonomyTestCase): + """ + Validate chat_completion client. + """ + @responses.activate + def test_client(self): + """ + Test that the chat completion client works as expected. + """ + chat_prompt = 'how many courses are offered by edx in the data science area' + expected_chat_response = { + "role": "assistant", + "content": "edx offers 500 courses in the data science area" + } + responses.add( + method=responses.POST, + url=settings.CHAT_COMPLETION_API, + json=expected_chat_response, + ) + chat_response = chat_completion(chat_prompt) + self.assertEqual(chat_response, expected_chat_response['content']) + + @patch('taxonomy.openai.client.requests.post') + def test_client_exceptions(self, post_mock): + """ + Test that the chat completion client handles exceptions as expected. + """ + chat_prompt = 'how many courses are offered by edx in the data science area' + post_mock.side_effect = ConnectionError() + chat_response = chat_completion(chat_prompt) + self.assertEqual(chat_response, 'Failed to connect to chat completion API.') + + def test_client_missing_settings(self): + """ + Test that the chat completion client handles missing settings as expected. + """ + chat_prompt = 'how many courses are offered by edx in the data science area' + settings.CHAT_COMPLETION_API_KEY = None + chat_response = chat_completion(chat_prompt) + self.assertEqual(chat_response, 'Completion endpoint is not defined.') diff --git a/tests/test_models.py b/tests/test_models.py index 3fb2d335..fb5c6065 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -346,30 +346,26 @@ def test_string_representation(self): assert expected_repr == job.__repr__() @pytest.mark.use_signals - @patch('taxonomy.openai.client.openai.ChatCompletion.create') + @patch('taxonomy.openai.client.requests.post') @patch('taxonomy.utils.generate_and_store_job_description', wraps=generate_and_store_job_description) @patch('taxonomy.signals.handlers.generate_job_description.delay', wraps=generate_job_description) def test_chat_completion_is_called( # pylint: disable=invalid-name self, mocked_generate_job_description_task, mocked_generate_and_store_job_description, - mocked_chat_completion + mock_requests ): """ Verify that complete flow works as expected when a Job model object is created. """ ai_response = 'One who manages a Computer Network.' - mocked_chat_completion.return_value = { - 'choices': [{ - 'message': { - 'content': ai_response - } - }] + mock_requests.return_value.json.return_value = { + "role": "assistant", + "content": ai_response } job_external_id = '1111' job_name = 'Network Admin' - prompt = settings.JOB_DESCRIPTION_PROMPT.format(job_name=job_name) Job(external_id=job_external_id, name=job_name).save() job = Job.objects.get(external_id=job_external_id) @@ -377,10 +373,7 @@ def test_chat_completion_is_called( # pylint: disable=invalid-name assert job.description == ai_response mocked_generate_job_description_task.assert_called_once_with(job_external_id, job_name) mocked_generate_and_store_job_description.assert_called_once_with(job_external_id, job_name) - mocked_chat_completion.assert_called_once_with( - model='gpt-3.5-turbo', - messages=[{'role': 'user', 'content': prompt}] - ) + mock_requests.assert_called_once() @pytest.mark.use_signals @patch('taxonomy.utils.chat_completion') diff --git a/tests/test_views.py b/tests/test_views.py index 62d28ece..388844b8 100644 --- a/tests/test_views.py +++ b/tests/test_views.py @@ -646,7 +646,7 @@ def setUp(self) -> None: self.client.login(username=self.user.username, password=USER_PASSWORD) self.view_url = '/api/v1/job-path/' - @patch('taxonomy.openai.client.openai.ChatCompletion.create') + @patch('taxonomy.openai.client.requests.post') @patch( 'taxonomy.api.v1.serializers.generate_and_store_job_to_job_description', wraps=generate_and_store_job_to_job_description @@ -654,18 +654,15 @@ def setUp(self) -> None: def test_job_path_api( # pylint: disable=invalid-name self, mocked_generate_and_store_job_to_job_description, - mocked_chat_completion + mock_requests ): """ Verify that job path API returns the expected response. """ ai_response = 'You can not switch from your current job to future job' - mocked_chat_completion.return_value = { - 'choices': [{ - 'message': { - 'content': ai_response - } - }] + mock_requests.return_value.json.return_value = { + "role": "assistant", + "content": ai_response } query_params = {