Skip to content
This repository has been archived by the owner on Aug 30, 2024. It is now read-only.

Commit

Permalink
add whisper model check (#189)
Browse files Browse the repository at this point in the history
  • Loading branch information
intellinjun authored Mar 22, 2024
1 parent ed6e8ad commit 66bcc8b
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 4 deletions.
13 changes: 10 additions & 3 deletions neural_speed/application/quant_whisper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
#include "models/model_utils/quant_utils.h"
#include "common.h"

#define F_OK 0

inline bool exists_model(const std::string& name) { return (access(name.c_str(), F_OK) != -1); }
int main(int argc, char** argv) {
quant_params q_params;
if (quant_params_parse(argc, argv, q_params) == false) {
Expand Down Expand Up @@ -52,9 +55,13 @@ int main(int argc, char** argv) {
// load the model
{
const int64_t t_start_us = ne_time_us();

if (!whisper_model_quantize(fname_inp, fname_out, ne_ftype(ftype))) {
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
if (exists_model(fname_inp)) {
if (!whisper_model_quantize(fname_inp, fname_out, ne_ftype(ftype))) {
fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str());
return 1;
}
} else {
fprintf(stderr, "%s: model is not exist '%s'\n", __func__, fname_inp.c_str());
return 1;
}

Expand Down
2 changes: 1 addition & 1 deletion neural_speed/models/gemma/gemma_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ void Gemma::init(const char* path_model, model_context* ctx, int n_gpu_layer_, b
n_head = hparams.n_head;
n_expert = hparams.n_experts;
n_expert_used = hparams.n_experts_used;
scratch = gemma_mem_req(n_layer, lctx.scratch_size_ratio);
scratch = gemma_mem_req(n_layer, lctx.scratch_size_ratio * 1.5);
model.scratchs = scratch;
}

Expand Down

0 comments on commit 66bcc8b

Please sign in to comment.