Skip to content

Commit

Permalink
add mobile model
Browse files Browse the repository at this point in the history
  • Loading branch information
zoollcar committed Nov 14, 2024
1 parent 09b06d8 commit e82460b
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 12 deletions.
6 changes: 3 additions & 3 deletions src/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -246,16 +246,16 @@ function App() {
if (chat instanceof LLMChatWebLLM) {
if (chat.getInitStatus() === "not start") {
const answer = confirm(
"webLLM need to load at every time, first time may need some time to download model(~1.5G). load now?"
"webLLM need to load every time, first time need some time to download model(~1.5G(PC)/~800MB(phone)). load now?"
);
if (answer) {
const timer = setInterval(() => {
setSubtitle(chat.initProgress || "webLLM loading");
}, 1000);
chat.init().then(() => {
alert("webLLM loaded");
clearInterval(timer);
setSubtitle("");
if (chat.getInitStatus() === "done") setSubtitle("webLLM loaded");
else setSubtitle("webLLM error");
});
}
return;
Expand Down
19 changes: 11 additions & 8 deletions src/models/llm/LLMChatWebLLM.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,21 +24,24 @@ export default class LLMChatWebLLM {

async init() {
// This is an asynchronous call and can take a long time to finish
// models are https://github.com/mlc-ai/web-llm/blob/main/src/config.ts
this.initStatus = "working";
const maxStorageBufferBindingSize =
await this.client.getMaxStorageBufferBindingSize();
console.log(maxStorageBufferBindingSize);
let selectedModel = "RedPajama-INCITE-Chat-3B-v1-q4f16_1-1k";
if (maxStorageBufferBindingSize >= 2147480000) {
// ~2G
selectedModel = "Llama-3.2-3B-Instruct-q4f16_1-MLC";
}
try {
const maxStorageBufferBindingSize =
await this.client.getMaxStorageBufferBindingSize();
// alert(maxStorageBufferBindingSize);
let selectedModel = "SmolLM2-135M-Instruct-q0f32-MLC";
if (maxStorageBufferBindingSize >= 1073741800) {
// ~1G
selectedModel = "Llama-3.2-3B-Instruct-q4f16_1-MLC";
}
console.log(`webLLM: select ${selectedModel}`);
await this.client.reload(selectedModel);
this.initStatus = "done";
} catch (e) {
this.initStatus = "error";
this.initProgress = (e as Error).message;
alert("Error: " + (e as Error).message);
}
}

Expand Down
1 change: 0 additions & 1 deletion src/models/tts/textToSpeech.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { getBackendEndpoint } from "../appstore";

// const vitsWebWorker = new Worker("./vitsWeb");
const vitsWebWorker = new Worker(new URL("./vitsWeb.ts", import.meta.url), {
type: "module", // Specify that the worker is a module
});
Expand Down

0 comments on commit e82460b

Please sign in to comment.