From 51499a64fde3c58b1fe8e993b064c1fa4346db7c Mon Sep 17 00:00:00 2001 From: InNoobWeTrust Date: Tue, 27 Feb 2024 16:29:36 +0700 Subject: [PATCH] fix(nitro-node): use exception instead of returning an error inside return type --- nitro-node/src/nitro.ts | 66 +++++++++++++++++------------------ nitro-node/src/types/index.ts | 1 - 2 files changed, 33 insertions(+), 34 deletions(-) diff --git a/nitro-node/src/nitro.ts b/nitro-node/src/nitro.ts index 2c61954cc..7e4736611 100644 --- a/nitro-node/src/nitro.ts +++ b/nitro-node/src/nitro.ts @@ -122,8 +122,11 @@ async function initialize(): Promise { * @param wrapper - The model wrapper. * @returns A Promise that resolves when the subprocess is terminated successfully, or rejects with an error message if the subprocess fails to terminate. */ -function stopModel(): Promise { - return killSubprocess(); +async function stopModel(): Promise { + await killSubprocess(); + // Unload settings + currentSettings = undefined; + return {}; } /** @@ -216,21 +219,15 @@ async function runNitroAndLoadModel( if (process.platform === "win32") { return await new Promise((resolve) => setTimeout(() => resolve({}), 500)); } - const spawnResult = await spawnNitroProcess(runMode); - if (spawnResult.error) { - return spawnResult; - } + await spawnNitroProcess(runMode); // TODO: Use this response? const _loadModelResponse = await loadLLMModel(currentSettings!); - const validationResult = await validateModelStatus(); - if (validationResult.error) { - return validationResult; - } - return {}; + await validateModelStatus(); + return { modelFile: currentSettings?.llama_model_path }; } catch (err: any) { // TODO: Broadcast error so app could display proper error message log(`[NITRO]::Error: ${err}`); - return { error: err }; + throw err; } } @@ -320,7 +317,7 @@ async function chatCompletion( * If the model is loaded successfully, the object is empty. * If the model is not loaded successfully, the object contains an error message. */ -async function validateModelStatus(): Promise { +async function validateModelStatus(): Promise { // Send a GET request to the validation URL. // Retry the request up to 3 times if it fails, with a delay of 500 milliseconds between retries. const response = await fetchRetry(NITRO_HTTP_VALIDATE_MODEL_URL, { @@ -342,44 +339,47 @@ async function validateModelStatus(): Promise { // If the model is loaded, return an empty object. // Otherwise, return an object with an error message. if (body.model_loaded) { - return {}; + return; } } - return { error: "Validate model status failed" }; + throw Error("Validate model status failed"); } /** * Terminates the Nitro subprocess. * @returns A Promise that resolves when the subprocess is terminated successfully, or rejects with an error message if the subprocess fails to terminate. */ -async function killSubprocess(): Promise { +async function killSubprocess(): Promise { const controller = new AbortController(); setTimeout(() => controller.abort(), 5000); log(`[NITRO]::Debug: Request to kill Nitro`); - try { - // FIXME: should use this response? - const _response = await fetch(NITRO_HTTP_KILL_URL, { - method: "DELETE", - signal: controller.signal, - }); - subprocess?.kill(); - subprocess = undefined; - await tcpPortUsed.waitUntilFree(PORT, 300, 5000); - log(`[NITRO]::Debug: Nitro process is terminated`); - return {}; - } catch (err) { - return { error: err }; + // Request self-kill if server is running + if (await tcpPortUsed.check(PORT)) { + try { + // FIXME: should use this response? + const response = await fetch(NITRO_HTTP_KILL_URL, { + method: "DELETE", + signal: controller.signal, + }); + } catch (err: any) { + // FIXME: Nitro exits without response so fetching will fail + // Intentionally ignore the error + } } + // Force kill subprocess + subprocess?.kill(); + subprocess = undefined; + await tcpPortUsed.waitUntilFree(PORT, 300, 5000); + log(`[NITRO]::Debug: Nitro process is terminated`); + return; } /** * Spawns a Nitro subprocess. * @returns A promise that resolves when the Nitro subprocess is started. */ -function spawnNitroProcess( - runMode?: "cpu" | "gpu", -): Promise { +function spawnNitroProcess(runMode?: "cpu" | "gpu"): Promise { log(`[NITRO]::Debug: Spawning Nitro subprocess...`); return new Promise(async (resolve, reject) => { @@ -419,7 +419,7 @@ function spawnNitroProcess( tcpPortUsed.waitUntilUsed(PORT, 300, 5000).then(() => { log(`[NITRO]::Debug: Nitro is ready`); - resolve({}); + resolve(); }); }); } diff --git a/nitro-node/src/types/index.ts b/nitro-node/src/types/index.ts index e12ebf489..2250f391c 100644 --- a/nitro-node/src/types/index.ts +++ b/nitro-node/src/types/index.ts @@ -7,7 +7,6 @@ import stream from "node:stream"; * @property error - An error message if the model fails to load. */ export interface NitroModelOperationResponse { - error?: any; modelFile?: string; }