From 7b8ab006463c1c662fcd56fdb7e82371fcd33778 Mon Sep 17 00:00:00 2001 From: "PJLAB\\lijiaying" Date: Thu, 1 Feb 2024 16:57:14 +0800 Subject: [PATCH 1/8] [feature] support gemini stream request --- openaoe/backend/api/route_google.py | 2 +- openaoe/backend/requirements.txt | 1 + openaoe/backend/service/base.py | 93 ++++++++++++++++++++ openaoe/backend/service/service_google.py | 101 +++++++++++++++++----- openaoe/backend/util/log.py | 9 ++ 5 files changed, 182 insertions(+), 24 deletions(-) create mode 100644 openaoe/backend/service/base.py diff --git a/openaoe/backend/api/route_google.py b/openaoe/backend/api/route_google.py index 37d90d6..ffd19de 100644 --- a/openaoe/backend/api/route_google.py +++ b/openaoe/backend/api/route_google.py @@ -13,5 +13,5 @@ async def palm_chat(body: GooglePalmChatBody): @param body: request body @return: response """ - ret = palm_chat_svc(body) + ret = await palm_chat_svc(body) return ret diff --git a/openaoe/backend/requirements.txt b/openaoe/backend/requirements.txt index 0454124..ba227db 100644 --- a/openaoe/backend/requirements.txt +++ b/openaoe/backend/requirements.txt @@ -12,3 +12,4 @@ pyyaml==6.0.1 httpx==0.25.0 sse-starlette==1.8.2 anyio==3.7.1 +jsonstreamer==1.3.8 \ No newline at end of file diff --git a/openaoe/backend/service/base.py b/openaoe/backend/service/base.py new file mode 100644 index 0000000..35f93e0 --- /dev/null +++ b/openaoe/backend/service/base.py @@ -0,0 +1,93 @@ +#! /bin/python3 +import json +import sys +import traceback +from io import StringIO + +import httpx +from fastapi.encoders import jsonable_encoder +from jsonstreamer import ObjectStreamer + +from openaoe.backend.config.constant import DEFAULT_TIMEOUT_SECONDS +from openaoe.backend.model.aoe_response import AOEResponse +from openaoe.backend.util.log import log + +logger = log(__name__) + + +async def base_request(provider, url, method: str, headers: dict, body=None, timeout=DEFAULT_TIMEOUT_SECONDS, + params=None, + files=None) -> AOEResponse: + response = AOEResponse() + + headers_pure = {} + for k, v in headers: + k = k.lower() + if k == "user-agent" or k == "host" or "ip" in k: + continue + headers_pure[k] = v + + body_str = body + if "content-type" in headers and "multipart/form-data" in headers["content-type"]: + body_str = "image" + if len(body_str) > 200: + body_str = body_str[:200] + + try: + async with httpx.AsyncClient() as client: + proxy = await client.request(method, url, headers=headers, json=body, timeout=timeout, + params=params, files=files) + response.data = proxy.content + try: + response.data = json.loads(response.data) + except: + response.data = proxy.content + + except Exception as e: + response.msg = str(e) + logger.error(f"[{provider}] url: {url}, method: {method}, headers: {jsonable_encoder(headers)}, " + f"body: {body_str} failed, response: {jsonable_encoder(response)}") + return response + + +async def base_stream(provider, url, method: str, headers: dict, stream_callback, body=None, + timeout=DEFAULT_TIMEOUT_SECONDS, + params=None, + files=None): + headers_pure = { + "Content-Type": "application/json" + } + for k, v in headers: + k = k.lower() + if k == "user-agent" or k == "host" or "ip" in k: + continue + headers_pure[k] = v + + body_str = jsonable_encoder(body) + if "content-type" in headers and "multipart/form-data" in headers["content-type"]: + body_str = "image" + if len(body_str) > 200: + body_str = body_str[:200] + + try: + with httpx.stream(method, url, json=body, params=params, files=files, headers=headers_pure, + timeout=timeout) as res: + # stream parser + streamer = ObjectStreamer() + sys.stdout = mystdout = StringIO() + streamer.add_catch_all_listener(stream_callback) + + for text in res.iter_text(): + streamer.consume(text) + yield mystdout.getvalue() + # clear printed string + sys.stdout.seek(0) + sys.stdout.truncate() + + except Exception as e: + print(traceback.format_exc()) + response = AOEResponse() + response.msg = str(e) + logger.error(f"[{provider}] url: {url}, method: {method}, headers: {jsonable_encoder(headers_pure)}, " + f"body: {body_str} failed, response: {jsonable_encoder(response)}") + yield response diff --git a/openaoe/backend/service/service_google.py b/openaoe/backend/service/service_google.py index 40da4ca..aeb8662 100644 --- a/openaoe/backend/service/service_google.py +++ b/openaoe/backend/service/service_google.py @@ -1,40 +1,29 @@ -import json - -import requests +from jsonstreamer import ObjectStreamer +from sse_starlette import EventSourceResponse from openaoe.backend.config.biz_config import get_api_key, get_base_url from openaoe.backend.config.constant import * from openaoe.backend.model.aoe_response import AOEResponse from openaoe.backend.model.google import GooglePalmChatBody +from openaoe.backend.service.base import base_request, base_stream from openaoe.backend.util.log import log logger = log(__name__) -def palm_chat_svc(body: GooglePalmChatBody): +async def palm_chat_svc(body: GooglePalmChatBody): """ chat logic for google PaLM model """ - api_key = get_api_key(PROVIDER_GOOGLE, body.model) - url = get_base_url(PROVIDER_GOOGLE, body.model) - url = f"{url}/google/v1beta2/models/{body.model}:generateMessage?key={api_key}" - messages = [ - {"content": msg.content, "author": msg.author} - for msg in body.prompt.messages or [] - ] - body = { - "prompt": { - "messages": messages - }, - "temperature": body.temperature, - "candidate_count": body.candidate_count, - "model": body.model - } + url, params, request_body = _construct_request_data(body) + + if "gemini" in body.model: + return EventSourceResponse( + base_stream(PROVIDER_GOOGLE, url, "post", {}, _catch_all, body=request_body, params=params)) + try: - response_json = requests.post( - url=url, - data=json.dumps(body) - ).json() + response = await base_request(PROVIDER_GOOGLE, url, "post", {}, request_body, params=params) + response_json = response.data if response_json.get('error') is not None: err_msg = response_json.get('error').get("message") return AOEResponse( @@ -57,3 +46,69 @@ def palm_chat_svc(body: GooglePalmChatBody): data=str(e) ) return base + + +def _construct_request_data(body: GooglePalmChatBody): + model = body.model + api_base = get_base_url(PROVIDER_GOOGLE, body.model) + api_key = get_api_key(PROVIDER_GOOGLE, body.model) + + if "gemini" in model: + # specially process gemini request + url = f"{api_base}/v1beta/models/{model}:streamGenerateContent" + params = { + "key": api_key + } + + if body.prompt.messages[0].author == "1": + body.prompt.messages = body.prompt.messages[1:] + contents = [ + { + "role": "user" if msg.author == "0" else "model", + "parts": [{ + "text": msg.content + }] + } + for msg in body.prompt.messages + ] + body = { + "contents": contents, + "generationConfig": { + "temperature": body.temperature, + "candidateCount": 1 + } + } + else: + url = f"{api_base}/google/v1beta2/models/{model}:generateMessage?key={api_key}" + params = { + "key": api_key + } + messages = [ + {"content": msg.content, "author": msg.author} + for msg in body.prompt.messages or [] + ] + body = { + "prompt": { + "messages": messages + }, + "temperature": body.temperature, + "candidate_count": body.candidate_count, + "model": body.model + } + return url, params, body + + +def _catch_all(event_name, *args): + if event_name == ObjectStreamer.PAIR_EVENT and args[0] == "text": + print(args[1]) + return + + elif event_name != ObjectStreamer.ELEMENT_EVENT: + return + + for item in args: + try: + text = item["candidates"][0]["content"]["parts"][0]["text"] + print(text) + except: + logger.warning(f"parse error, raw: {item}") diff --git a/openaoe/backend/util/log.py b/openaoe/backend/util/log.py index f3db868..2b34450 100644 --- a/openaoe/backend/util/log.py +++ b/openaoe/backend/util/log.py @@ -17,4 +17,13 @@ def log(name): return logger +def clear_other_log(): + for name, item in logging.Logger.manager.loggerDict.items(): + if not isinstance(item, logging.Logger): + continue + if "aoe" not in name: + item.setLevel(logging.CRITICAL) + + +clear_other_log() logger = log("util") From e6d925e9811fd77beb8d55b1bee3efc841ae6be8 Mon Sep 17 00:00:00 2001 From: Echo-minn Date: Thu, 1 Feb 2024 17:45:21 +0800 Subject: [PATCH 2/8] feat: mod streamModels --- .../global-config/global-config-context.tsx | 2 +- .../src/components/global-config/global-config.tsx | 14 ++++++++------ .../components/chat-operations/chat-operation.tsx | 4 ++-- .../chat/components/prompt-input/prompt-input.tsx | 6 +++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/openaoe/frontend/src/components/global-config/global-config-context.tsx b/openaoe/frontend/src/components/global-config/global-config-context.tsx index deb404b..c63231d 100644 --- a/openaoe/frontend/src/components/global-config/global-config-context.tsx +++ b/openaoe/frontend/src/components/global-config/global-config-context.tsx @@ -2,7 +2,7 @@ import React from 'react'; export const DefaultConfigInfo = { models: null, - streamProviders: [] + streamModels: [] }; export const GlobalConfigContext = React.createContext(DefaultConfigInfo); diff --git a/openaoe/frontend/src/components/global-config/global-config.tsx b/openaoe/frontend/src/components/global-config/global-config.tsx index c79f1fa..0668bee 100644 --- a/openaoe/frontend/src/components/global-config/global-config.tsx +++ b/openaoe/frontend/src/components/global-config/global-config.tsx @@ -11,7 +11,7 @@ export interface GlobalInfoProps { const GlobalConfig: FC = ({ children }) => { const [models, setModels] = useState(defaultModels); - const [streamProviders, setStreamProviders] = useState(STREAM_BOT); + const [streamModels, setStreamProviders] = useState(STREAM_BOT); const [loading, setLoading] = useState(true); useEffect(() => { @@ -20,11 +20,13 @@ const GlobalConfig: FC = ({ children }) => { .then(res => res.json()) .then(res => { if (res && typeof res.models === 'object') { - setModels(res.models); + const rspModels = res.models; const streamArray = []; - Object.keys(models).forEach((model) => { - if (models[model].webui.isStream !== false && !streamArray.includes(models[model].provider)) { - streamArray.push(models[model].provider); + setModels(rspModels); + // stream status for every model + Object.keys(rspModels).forEach((model) => { + if (rspModels[model].webui.isStream !== false && !streamArray.includes(model)) { + streamArray.push(model); } }); if (streamArray.length > 0) { @@ -42,7 +44,7 @@ const GlobalConfig: FC = ({ children }) => { const values = useMemo(() => ({ models, - streamProviders, + streamModels, }), [models]); return ( diff --git a/openaoe/frontend/src/pages/chat/components/chat-operations/chat-operation.tsx b/openaoe/frontend/src/pages/chat/components/chat-operations/chat-operation.tsx index 364f46f..834119c 100644 --- a/openaoe/frontend/src/pages/chat/components/chat-operations/chat-operation.tsx +++ b/openaoe/frontend/src/pages/chat/components/chat-operations/chat-operation.tsx @@ -121,7 +121,7 @@ const RetryIcon = () => { ); }; const ChatOperation = (props: ChatOperationProps) => { - const { models, streamProviders } = useContext(GlobalConfigContext); + const { models, streamModels } = useContext(GlobalConfigContext); const { modelName } = props; const chatStore = useChatStore(); const { sessions } = chatStore; @@ -165,7 +165,7 @@ const ChatOperation = (props: ChatOperationProps) => { const handleRetry = () => { const model = currSession.name === SERIAL_SESSION ? botStore.currentBot : chatStore.lastBotMessage(currSession.name).model; const provider = models[model]?.provider || ''; - const isStream = streamProviders.includes(provider); + const isStream = streamModels.includes(model); chatStore.retry(currSession.name, provider, model, isStream); }; diff --git a/openaoe/frontend/src/pages/chat/components/prompt-input/prompt-input.tsx b/openaoe/frontend/src/pages/chat/components/prompt-input/prompt-input.tsx index 62699cf..341374f 100644 --- a/openaoe/frontend/src/pages/chat/components/prompt-input/prompt-input.tsx +++ b/openaoe/frontend/src/pages/chat/components/prompt-input/prompt-input.tsx @@ -25,7 +25,7 @@ const BOT_PLACEHOLDER = { }; const PromptInput = () => { - const { models, streamProviders } = useContext(GlobalConfigContext); + const { models, streamModels } = useContext(GlobalConfigContext); const chatStore = useChatStore(); const botStore = useBotStore(); const configStore = useConfigStore(); @@ -108,12 +108,12 @@ const PromptInput = () => { handleContentChange(''); if (configStore.mode === SERIAL_MODE) { const provider = models[botStore.currentBot]?.provider; - const isStream = streamProviders.includes(provider); + const isStream = streamModels.includes(botStore.currentBot); chatStore.onUserInput(currPrompt, provider, botStore.currentBot, SERIAL_SESSION, isStream); } else { botStore.chosenBotNames.forEach((botName) => { const provider = models[botName]?.provider; - const isStream = streamProviders.includes(provider); + const isStream = streamModels.includes(botName); chatStore.onUserInput(currPrompt, provider, botName, botName, isStream); }); } From 5a93c167a1a74e5e5878328f4a608ee8e5c6d8fa Mon Sep 17 00:00:00 2001 From: "PJLAB\\lijiaying" Date: Thu, 1 Feb 2024 18:28:09 +0800 Subject: [PATCH 3/8] [feature] support gemini stream request --- openaoe/backend/model/aoe_response.py | 9 ++++++++- openaoe/backend/service/base.py | 17 +++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/openaoe/backend/model/aoe_response.py b/openaoe/backend/model/aoe_response.py index 9013899..1371a45 100644 --- a/openaoe/backend/model/aoe_response.py +++ b/openaoe/backend/model/aoe_response.py @@ -1,6 +1,7 @@ -from pydantic import BaseModel from typing import Optional +from pydantic import BaseModel + class AOEResponse(BaseModel): """ @@ -11,3 +12,9 @@ class AOEResponse(BaseModel): data: Optional[object] = None +class StreamResponse(BaseModel): + """ + Standard OpenAOE stream response + """ + success: Optional[bool] = True + msg: Optional[str] = "" diff --git a/openaoe/backend/service/base.py b/openaoe/backend/service/base.py index 35f93e0..3279322 100644 --- a/openaoe/backend/service/base.py +++ b/openaoe/backend/service/base.py @@ -9,7 +9,7 @@ from jsonstreamer import ObjectStreamer from openaoe.backend.config.constant import DEFAULT_TIMEOUT_SECONDS -from openaoe.backend.model.aoe_response import AOEResponse +from openaoe.backend.model.aoe_response import AOEResponse, StreamResponse from openaoe.backend.util.log import log logger = log(__name__) @@ -79,15 +79,20 @@ async def base_stream(provider, url, method: str, headers: dict, stream_callback for text in res.iter_text(): streamer.consume(text) - yield mystdout.getvalue() + res = mystdout.getvalue() + stream_res = json.dumps(jsonable_encoder(StreamResponse(msg=res))) + # format res + yield stream_res # clear printed string sys.stdout.seek(0) sys.stdout.truncate() except Exception as e: print(traceback.format_exc()) - response = AOEResponse() - response.msg = str(e) + res = json.dumps(jsonable_encoder(StreamResponse( + success=False, + msg=str(e) + ))) logger.error(f"[{provider}] url: {url}, method: {method}, headers: {jsonable_encoder(headers_pure)}, " - f"body: {body_str} failed, response: {jsonable_encoder(response)}") - yield response + f"body: {body_str} failed, response: {res}") + yield res From 3366c7c6dd74c4bc39df5b7062d6dd6dfcfed17a Mon Sep 17 00:00:00 2001 From: Echo-minn Date: Fri, 2 Feb 2024 10:41:34 +0800 Subject: [PATCH 4/8] feat: data.success.toString() --- openaoe/frontend/src/store/chat.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/openaoe/frontend/src/store/chat.ts b/openaoe/frontend/src/store/chat.ts index 308348a..d40a271 100644 --- a/openaoe/frontend/src/store/chat.ts +++ b/openaoe/frontend/src/store/chat.ts @@ -116,7 +116,6 @@ export const useChatStore = create()( onNewMessage(message, sessionIndex = 0) { const session = get().sessions[sessionIndex]; session.messages.push(message); - console.log('[BOT] onNewMessage: ', message, sessionIndex); set(() => ({ sessions: get().sessions })); }, deleteMessage(sessionName, messageIndex) { @@ -251,7 +250,7 @@ export const useChatStore = create()( onmessage: (event) => { if (isStream && typeof event.data === 'string' && event.data.charAt(0) === '{') { const data = JSON.parse(event.data); - if (data && data.success === 'true') { + if (data && data.success.toString() === 'true') { if (data.stop_reason === 'max_token') { // max_token means abnormal interruption botMessage.stream = false; get().updateMessage(sessionIndex, messageIndex, (message) => { From 9af88f202d5c18c85d60e17f8a55133a2ff7b306 Mon Sep 17 00:00:00 2001 From: "PJLAB\\lijiaying" Date: Fri, 2 Feb 2024 16:05:57 +0800 Subject: [PATCH 5/8] 1. base_stream check status_code first 2. fix palm api bug --- openaoe/backend/service/base.py | 3 +++ openaoe/backend/service/service_google.py | 15 ++++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/openaoe/backend/service/base.py b/openaoe/backend/service/base.py index 3279322..27573db 100644 --- a/openaoe/backend/service/base.py +++ b/openaoe/backend/service/base.py @@ -72,6 +72,9 @@ async def base_stream(provider, url, method: str, headers: dict, stream_callback try: with httpx.stream(method, url, json=body, params=params, files=files, headers=headers_pure, timeout=timeout) as res: + if res.status_code != 200: + raise Exception(f"request failed, model status code: {res.status_code}") + # stream parser streamer = ObjectStreamer() sys.stdout = mystdout = StringIO() diff --git a/openaoe/backend/service/service_google.py b/openaoe/backend/service/service_google.py index aeb8662..a10d0f2 100644 --- a/openaoe/backend/service/service_google.py +++ b/openaoe/backend/service/service_google.py @@ -79,14 +79,19 @@ def _construct_request_data(body: GooglePalmChatBody): } } else: - url = f"{api_base}/google/v1beta2/models/{model}:generateMessage?key={api_key}" + url = f"{api_base}/v1beta2/models/{model}:generateMessage?key={api_key}" params = { "key": api_key } - messages = [ - {"content": msg.content, "author": msg.author} - for msg in body.prompt.messages or [] - ] + + messages = [] + last_author = "" + # ignore not answered prompt + for item in body.prompt.messages: + if item.author == last_author: + messages.pop() + messages.append({"content": item.content, "author": item.author}) + last_author = item.author body = { "prompt": { "messages": messages From 3297cfdaf827a03f61980155693d2cbdfd9ae895 Mon Sep 17 00:00:00 2001 From: "PJLAB\\lijiaying" Date: Fri, 2 Feb 2024 17:38:32 +0800 Subject: [PATCH 6/8] 1. function docstring 2. config-template add gemini-pro --- openaoe/backend/config/biz_config.py | 12 ++++--- openaoe/backend/config/config-template.yaml | 9 +++++ openaoe/backend/model/aoe_response.py | 2 ++ openaoe/backend/service/base.py | 40 +++++++++++++++++++-- openaoe/main.py | 4 +-- 5 files changed, 57 insertions(+), 10 deletions(-) diff --git a/openaoe/backend/config/biz_config.py b/openaoe/backend/config/biz_config.py index bac3d83..1360f69 100644 --- a/openaoe/backend/config/biz_config.py +++ b/openaoe/backend/config/biz_config.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import argparse import os import sys @@ -8,7 +10,7 @@ from openaoe.backend.util.log import log logger = log(__name__) -biz_config = None +BIZ_CONFIG = None class BizConfig: @@ -71,14 +73,14 @@ def load_config(config_path) -> BizConfig: logger.error("init configuration failed. Exit") sys.exit(-1) - global biz_config - biz_config = BizConfig(**m) + global BIZ_CONFIG + BIZ_CONFIG = BizConfig(**m) logger.info("init configuration successfully.") - return biz_config + return BIZ_CONFIG def get_model_configuration(provider: str, field, model_name: str = None): - models = biz_config.models_map + models = BIZ_CONFIG.models_map if not models: logger.error(f"invalid configuration file") sys.exit(-1) diff --git a/openaoe/backend/config/config-template.yaml b/openaoe/backend/config/config-template.yaml index 5fde96c..681e259 100644 --- a/openaoe/backend/config/config-template.yaml +++ b/openaoe/backend/config/config-template.yaml @@ -48,6 +48,15 @@ models: api: api_base: https://generativelanguage.googleapis.com api_key: + gemini-pro: + provider: google + webui: + avatar: 'https://oss.openmmlab.com/frontend/OpenAOE/google-palm.webp' + isStream: true + background: 'linear-gradient(#b55a5a26 0%, #fa5ab1 100%)' + api: + api_base: https://generativelanguage.googleapis.com + api_key: abab5-chat: provider: minimax webui: diff --git a/openaoe/backend/model/aoe_response.py b/openaoe/backend/model/aoe_response.py index 1371a45..0ba9e97 100644 --- a/openaoe/backend/model/aoe_response.py +++ b/openaoe/backend/model/aoe_response.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + from typing import Optional from pydantic import BaseModel diff --git a/openaoe/backend/service/base.py b/openaoe/backend/service/base.py index 27573db..9bd78cc 100644 --- a/openaoe/backend/service/base.py +++ b/openaoe/backend/service/base.py @@ -1,8 +1,9 @@ -#! /bin/python3 +#!/usr/bin/env python3 import json import sys import traceback from io import StringIO +from typing import Callable import httpx from fastapi.encoders import jsonable_encoder @@ -15,9 +16,24 @@ logger = log(__name__) -async def base_request(provider, url, method: str, headers: dict, body=None, timeout=DEFAULT_TIMEOUT_SECONDS, +async def base_request(provider: str, url: str, method: str, headers: dict, body=None, timeout=DEFAULT_TIMEOUT_SECONDS, params=None, files=None) -> AOEResponse: + """ + common request function for http + Args: + provider: use for log + url: complete url + method: request method + headers: request headers, excluding user-agent, host and ip. + body: json body only + timeout: seconds + params: request params + files: request file + + Returns: + AOEResponse + """ response = AOEResponse() headers_pure = {} @@ -50,10 +66,28 @@ async def base_request(provider, url, method: str, headers: dict, body=None, tim return response -async def base_stream(provider, url, method: str, headers: dict, stream_callback, body=None, +async def base_stream(provider: str, url: str, method: str, headers: dict, stream_callback: Callable, body=None, timeout=DEFAULT_TIMEOUT_SECONDS, params=None, files=None): + """ + common stream request + Args: + stream_callback: + provider: use for log + url: complete url + method: request method + headers: request headers, excluding user-agent, host and ip. + stream_callback: use ObjectStream to stream parse json, this method will be executed while any stream received, + use print to output(we have redirected stdout to response stream) + body: json body only + timeout: seconds + params: request params + files: request file + + Returns: + SSE response with StreamResponse json string + """ headers_pure = { "Content-Type": "application/json" } diff --git a/openaoe/main.py b/openaoe/main.py index 59f4319..f9741ca 100644 --- a/openaoe/main.py +++ b/openaoe/main.py @@ -27,14 +27,14 @@ path = img_out_path() # init configuration content -biz_config = init_config() +BIZ_CONFIG = init_config() app = FastAPI() @app.get("/config/json") async def get_config_json(): - return biz_config.json + return BIZ_CONFIG.json @app.get("/", response_class=HTMLResponse) From b8f82b56d0551724845f10c8371c8e502fa8a3a6 Mon Sep 17 00:00:00 2001 From: "PJLAB\\lijiaying" Date: Fri, 2 Feb 2024 17:48:03 +0800 Subject: [PATCH 7/8] [doc] progress updated --- README.md | 1 + docs/README_zh-CN.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 30abad3..b84ba1f 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ English | [简体中文](docs/README_zh-CN.md) ## Latest Progress 🎉 +- \[February 20024\] Add gemini-pro model - \[January 2024\] refactor the config-template.yaml to control the backend and the frontend settings at the same time, [click](https://github.com/InternLM/OpenAOE/blob/main/docs/tech-report/config-template.md) to find more introduction about the `config-template.yaml` - \[January 2024\] Add internlm2-chat-7b model - \[January 2024\] Released version v0.0.1, officially open source! diff --git a/docs/README_zh-CN.md b/docs/README_zh-CN.md index 3170f8d..c07997a 100644 --- a/docs/README_zh-CN.md +++ b/docs/README_zh-CN.md @@ -13,6 +13,7 @@ ## 最新进展 🎉 +- \[2024/02\] 添加gemini-pro模型 - \[2024/01\] 重构了config-template.yaml,可以同时配置前后端的设置 - \[2024/01\] 添加 internlm2-chat-7b 模型 - \[2024/01\] 发布v0.0.1版本,正式开源。 From 88ff6a02e9209091cfa57a9e989381c00ac4eba9 Mon Sep 17 00:00:00 2001 From: "PJLAB\\lijiaying" Date: Fri, 2 Feb 2024 17:52:04 +0800 Subject: [PATCH 8/8] [doc] progress updated --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b84ba1f..52de2b4 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ English | [简体中文](docs/README_zh-CN.md) ## Latest Progress 🎉 -- \[February 20024\] Add gemini-pro model +- \[February 2024\] Add gemini-pro model - \[January 2024\] refactor the config-template.yaml to control the backend and the frontend settings at the same time, [click](https://github.com/InternLM/OpenAOE/blob/main/docs/tech-report/config-template.md) to find more introduction about the `config-template.yaml` - \[January 2024\] Add internlm2-chat-7b model - \[January 2024\] Released version v0.0.1, officially open source!