Skip to content

Commit

Permalink
fix: gpt3计数器BUG
Browse files Browse the repository at this point in the history
  • Loading branch information
TBXark committed Oct 7, 2023
1 parent 748eef6 commit 004f788
Show file tree
Hide file tree
Showing 12 changed files with 96 additions and 98 deletions.
2 changes: 1 addition & 1 deletion dist/buildinfo.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"sha": "682c32a", "timestamp": 1696664248}
{"sha": "748eef6", "timestamp": 1696664893}
65 changes: 33 additions & 32 deletions dist/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ var ENV = {
MAX_TOKEN_LENGTH: 2048,
// 使用GPT3的TOKEN计数
GPT3_TOKENS_COUNT: false,
// GPT3计数器资源地址
GPT3_TOKENS_COUNT_REPO: "https://raw.githubusercontent.com/tbxark-arc/GPT-3-Encoder/master",
// 全局默认初始化消息
SYSTEM_INIT_MESSAGE: "You are a helpful assistant",
// 全局默认初始化消息角色
Expand All @@ -39,9 +41,9 @@ var ENV = {
// 检查更新的分支
UPDATE_BRANCH: "master",
// 当前版本
BUILD_TIMESTAMP: 1696664248,
BUILD_TIMESTAMP: 1696664893,
// 当前版本 commit id
BUILD_VERSION: "682c32a",
BUILD_VERSION: "748eef6",
I18N: null,
LANGUAGE: "zh-cn",
// 使用流模式
Expand Down Expand Up @@ -554,7 +556,7 @@ async function requestCompletionsFromOpenAI(message, history, context, onStream)
const timeout = 1e3 * 60 * 5;
setTimeout(() => controller.abort(), timeout);
let url = `${ENV.OPENAI_API_DOMAIN}/v1/chat/completions`;
let header = {
const header = {
"Content-Type": "application/json",
"Authorization": `Bearer ${key}`
};
Expand Down Expand Up @@ -660,33 +662,10 @@ async function updateBotUsage(usage, context) {
await DATABASE.put(context.SHARE_CONTEXT.usageKey, JSON.stringify(dbValue));
}

// src/gpt3.js
async function resourceLoader(key, url) {
try {
const raw = await DATABASE.get(key);
if (raw && raw !== "") {
return raw;
}
} catch (e) {
console.error(e);
}
try {
const bpe = await fetch(url, {
headers: {
"User-Agent": CONST.USER_AGENT
}
}).then((x) => x.text());
await DATABASE.put(key, bpe);
return bpe;
} catch (e) {
console.error(e);
}
return null;
}
async function gpt3TokensCounter() {
const repo = "https://raw.githubusercontent.com/tbxark-archive/GPT-3-Encoder/master";
const encoder = await resourceLoader("encoder_raw_file", `${repo}/encoder.json`).then((x) => JSON.parse(x));
const bpe_file = await resourceLoader("bpe_raw_file", `${repo}/vocab.bpe`);
// src/vendors/gpt3.js
async function gpt3TokensCounter(repo, loader) {
const encoder = await loader("encoder_raw_file", `${repo}/encoder.json`).then((x) => JSON.parse(x));
const bpe_file = await loader("bpe_raw_file", `${repo}/vocab.bpe`);
const range = (x, y) => {
const res = Array.from(Array(y).keys()).slice(x);
return res;
Expand Down Expand Up @@ -919,7 +898,29 @@ async function tokensCounter() {
let counter = (text) => Array.from(text).length;
try {
if (ENV.GPT3_TOKENS_COUNT) {
counter = await gpt3TokensCounter();
const loader = async (key, url) => {
try {
const raw = await DATABASE.get(key);
if (raw && raw !== "") {
return raw;
}
} catch (e) {
console.error(e);
}
try {
const bpe = await fetch(url, {
headers: {
"User-Agent": CONST.USER_AGENT
}
}).then((x) => x.text());
await DATABASE.put(key, bpe);
return bpe;
} catch (e) {
console.error(e);
}
return null;
};
counter = await gpt3TokensCounter(ENV.GPT3_TOKENS_COUNT_REPO, loader);
}
} catch (e) {
console.error(e);
Expand Down Expand Up @@ -2259,7 +2260,7 @@ async function defaultIndexAction() {
}
async function gpt3TokenTest(request) {
const text = new URL(request.url).searchParams.get("text") || "Hello World";
const counter = await gpt3TokensCounter();
const counter = await tokensCounter();
const HTML = renderHTML(`
<h1>ChatGPT-Telegram-Workers</h1>
<br/>
Expand Down
2 changes: 1 addition & 1 deletion dist/timestamp
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1696664248
1696664893
12 changes: 5 additions & 7 deletions src/chat.js
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
import {
deleteMessageFromTelegramWithContext,
sendChatActionToTelegramWithContext,
sendMessageToTelegramWithContext
sendMessageToTelegramWithContext,
} from './telegram.js';
import {DATABASE, ENV} from './env.js';
// eslint-disable-next-line no-unused-vars
import {Context} from './context.js';
import {requestCompletionsFromOpenAI} from "./openai.js";
import {tokensCounter} from "./utils.js";
import {isWorkersAIEnable, requestCompletionsFromWorkersAI} from "./workers-ai.js";

import {requestCompletionsFromOpenAI} from './openai.js';
import {tokensCounter} from './utils.js';
import {isWorkersAIEnable, requestCompletionsFromWorkersAI} from './workers-ai.js';


/**
Expand Down Expand Up @@ -106,7 +105,6 @@ async function loadHistory(key, context) {
}



/**
*
* @param {string} text
Expand Down Expand Up @@ -171,7 +169,7 @@ export async function chatWithLLM(text, context, modifier) {

let llm = requestCompletionsFromOpenAI;
if (isWorkersAIEnable(context)) {
llm = requestCompletionsFromWorkersAI
llm = requestCompletionsFromWorkersAI;
}

const answer = await requestCompletionsFromLLM(text, context, llm, modifier, onStream);
Expand Down
2 changes: 1 addition & 1 deletion src/context.js
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ export class Context {
*/
openAIKeyFromContext() {
if (ENV.AZURE_COMPLETIONS_API) {
return ENV.AZURE_API_KEY
return ENV.AZURE_API_KEY;
}
if (this.USER_CONFIG.OPENAI_API_KEY) {
return this.USER_CONFIG.OPENAI_API_KEY;
Expand Down
7 changes: 5 additions & 2 deletions src/env.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
* @property {number} MAX_HISTORY_LENGTH
* @property {number} MAX_TOKEN_LENGTH
* @property {boolean} GPT3_TOKENS_COUNT
* @property {string} GPT3_TOKENS_COUNT_REPO
* @property {string} SYSTEM_INIT_MESSAGE
* @property {string} SYSTEM_INIT_MESSAGE_ROLE
* @property {boolean} ENABLE_USAGE_STATISTICS
Expand Down Expand Up @@ -67,6 +68,8 @@ export const ENV = {
MAX_TOKEN_LENGTH: 2048,
// 使用GPT3的TOKEN计数
GPT3_TOKENS_COUNT: false,
// GPT3计数器资源地址
GPT3_TOKENS_COUNT_REPO: 'https://raw.githubusercontent.com/tbxark-arc/GPT-3-Encoder/master',
// 全局默认初始化消息
SYSTEM_INIT_MESSAGE: 'You are a helpful assistant',
// 全局默认初始化消息角色
Expand Down Expand Up @@ -106,11 +109,11 @@ export const ENV = {

// Azure API Key
AZURE_API_KEY: null,
// Azure Completions API
// Azure Completions API
AZURE_COMPLETIONS_API: null,

// workers ai模型
WORKERS_AI_MODEL: null
WORKERS_AI_MODEL: null,

};

Expand Down
6 changes: 3 additions & 3 deletions src/message.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ import {sendMessageToTelegramWithContext} from './telegram.js';
import {handleCommandMessage} from './command.js';
import {errorToString} from './utils.js';
import {chatWithLLM} from './chat.js';
import {isOpenAIEnable} from "./openai.js";
import {isWorkersAIEnable} from "./workers-ai.js";
import {isOpenAIEnable} from './openai.js';
import {isWorkersAIEnable} from './workers-ai.js';
// import {TelegramMessage, TelegramWebhookRequest} from './type.d.ts';


Expand Down Expand Up @@ -79,7 +79,7 @@ async function msgIgnoreOldMessage(message, context) {
* @return {Promise<Response>}
*/
async function msgCheckEnvIsReady(message, context) {
const llmEnable = isOpenAIEnable(context) || isWorkersAIEnable(context)
const llmEnable = isOpenAIEnable(context) || isWorkersAIEnable(context);
if (!llmEnable) {
return sendMessageToTelegramWithContext(context)('LLM Not Set');
}
Expand Down
10 changes: 5 additions & 5 deletions src/openai.js
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,15 @@ export async function requestCompletionsFromOpenAI(message, history, context, on
setTimeout(() => controller.abort(), timeout);

let url = `${ENV.OPENAI_API_DOMAIN}/v1/chat/completions`;
let header = {
const header = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${key}`,
}
};
if (ENV.AZURE_COMPLETIONS_API) {
url = ENV.AZURE_COMPLETIONS_API;
header['api-key'] = key
delete header['Authorization']
delete body.model
header['api-key'] = key;
delete header['Authorization'];
delete body.model;
}
const resp = await fetch(url, {
method: 'POST',
Expand Down
5 changes: 2 additions & 3 deletions src/router.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@ import {handleMessage} from './message.js';
import {API_GUARD, DATABASE, ENV} from './env.js';
import {bindCommandForTelegram, commandsDocument} from './command.js';
import {bindTelegramWebHook, getBot} from './telegram.js';
import {errorToString, historyPassword, makeResponse200, renderHTML} from './utils.js';
import {gpt3TokensCounter} from './gpt3.js';
import {errorToString, historyPassword, makeResponse200, renderHTML, tokensCounter} from './utils.js';


const helpLink = 'https://github.com/TBXark/ChatGPT-Telegram-Workers/blob/master/doc/DEPLOY.md';
Expand Down Expand Up @@ -160,7 +159,7 @@ async function defaultIndexAction() {
*/
async function gpt3TokenTest(request) {
const text = new URL(request.url).searchParams.get('text') || 'Hello World';
const counter = await gpt3TokensCounter();
const counter = await tokensCounter();
const HTML = renderHTML(`
<h1>ChatGPT-Telegram-Workers</h1>
<br/>
Expand Down
26 changes: 24 additions & 2 deletions src/utils.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import {CONST, DATABASE, ENV} from './env.js';
import {gpt3TokensCounter} from './gpt3.js';
import {gpt3TokensCounter} from './vendors/gpt3.js';

/**
* @param {number} length
Expand Down Expand Up @@ -127,7 +127,29 @@ export async function tokensCounter() {
let counter = (text) => Array.from(text).length;
try {
if (ENV.GPT3_TOKENS_COUNT) {
counter = await gpt3TokensCounter();
const loader = async (key, url) => {
try {
const raw = await DATABASE.get(key);
if (raw && raw !== '') {
return raw;
}
} catch (e) {
console.error(e);
}
try {
const bpe = await fetch(url, {
headers: {
'User-Agent': CONST.USER_AGENT,
},
}).then((x) => x.text());
await DATABASE.put(key, bpe);
return bpe;
} catch (e) {
console.error(e);
}
return null;
};
counter = await gpt3TokensCounter( ENV.GPT3_TOKENS_COUNT_REPO, loader);
}
} catch (e) {
console.error(e);
Expand Down
33 changes: 4 additions & 29 deletions src/gpt3.js → src/vendors/gpt3.js
Original file line number Diff line number Diff line change
@@ -1,35 +1,10 @@
/* eslint-disable camelcase, require-jsdoc */
/* eslint-disable */
// https://github.com/latitudegames/GPT-3-Encoder

import {CONST, DATABASE} from './env.js';

async function resourceLoader(key, url) {
try {
const raw = await DATABASE.get(key);
if (raw && raw !== '') {
return raw;
}
} catch (e) {
console.error(e);
}
try {
const bpe = await fetch(url, {
headers: {
'User-Agent': CONST.USER_AGENT,
},
}).then((x) => x.text());
await DATABASE.put(key, bpe);
return bpe;
} catch (e) {
console.error(e);
}
return null;
}

export async function gpt3TokensCounter() {
const repo = 'https://raw.githubusercontent.com/tbxark-archive/GPT-3-Encoder/master';
const encoder = await resourceLoader('encoder_raw_file', `${repo}/encoder.json`).then((x) => JSON.parse(x));
const bpe_file = await resourceLoader('bpe_raw_file', `${repo}/vocab.bpe`);
export async function gpt3TokensCounter(repo, loader) {
const encoder = await loader('encoder_raw_file', `${repo}/encoder.json`).then((x) => JSON.parse(x));
const bpe_file = await loader('bpe_raw_file', `${repo}/vocab.bpe`);

const range = (x, y) => {
const res = Array.from(Array(y).keys()).slice(x);
Expand Down
24 changes: 12 additions & 12 deletions src/workers-ai.js
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import {ENV, AI_LLM} from "./env.js";
import {Ai} from "./vendors/cloudflare-ai.js";
import {ENV, AI_LLM} from './env.js';
import {Ai} from './vendors/cloudflare-ai.js';


/**
* @param {Context} context
* @return {boolean}
*/
export function isWorkersAIEnable(context) {
return AI_LLM !== null;
// return ENV.WORKERS_AI_MODEL !== null;
return AI_LLM !== null;
// return ENV.WORKERS_AI_MODEL !== null;
}


Expand All @@ -21,12 +22,11 @@ export function isWorkersAIEnable(context) {
* @return {Promise<string>}
*/
export async function requestCompletionsFromWorkersAI(message, history, context, onStream) {

const ai = new Ai(AI_LLM);
const model = ENV.WORKERS_AI_MODEL || '@cf/meta/llama-2-7b-chat-int8'
const request = {
messages: [...history || [], { role: "user", content: message }]
};
const response = await ai.run(model, request);
return response.response;
const ai = new Ai(AI_LLM);
const model = ENV.WORKERS_AI_MODEL || '@cf/meta/llama-2-7b-chat-int8';
const request = {
messages: [...history || [], {role: 'user', content: message}],
};
const response = await ai.run(model, request);
return response.response;
}

0 comments on commit 004f788

Please sign in to comment.