forked from AudioLLMs/AudioBench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
66 lines (49 loc) · 2.4 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#!/usr/bin/env python
# -*- coding:utf-8 -*-
###
# Created Date: Friday, November 10th 2023, 12:25:19 pm
# Author: Bin Wang
# -----
# Copyright (c) Bin Wang @ [email protected]
#
# -----
# HISTORY:
# Date&Time By Comments
# ---------- --- ----------------------------------------------------------
###
# add parent directory to sys.path
import sys
sys.path.append('.')
import logging
import torch
from model_src.whisper_large_v3_with_llama_3_8b_instruct import whisper_large_v3_with_llama_3_8b_instruct_model_loader, whisper_large_v3_with_llama_3_8b_instruct_model_generation
# from model_src.salmonn_7b import salmonn_7b_model_loader, salmonn_7b_model_generation
from model_src.llama3_1_s_whisperspeech import llama3_1_s_model_loader, llama3_1_s_model_generation
# = = = = = = = = = = = Logging Setup = = = = = = = = = = = = =
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
class Model(object):
def __init__(self, model_name_or_path):
self.model_name = model_name_or_path
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.load_model()
logger.info("Loaded model: {}".format(self.model_name))
logger.info("= = "*20)
def load_model(self):
if self.model_name == "whisper_large_v3_with_llama_3_8b_instruct": whisper_large_v3_with_llama_3_8b_instruct_model_loader(self)
# elif self.model_name == "salmonn_7b": salmonn_7b_model_loader(self)
elif "llama3-s" or "ichigo" in self.model_name.lower(): llama3_1_s_model_loader(self)
else:
raise NotImplementedError("Model {} not implemented yet".format(self.model_name))
def generate(self, input):
with torch.no_grad():
if self.model_name == "whisper_large_v3_with_llama_3_8b_instruct": return whisper_large_v3_with_llama_3_8b_instruct_model_generation(self, input)
# elif self.model_name == "salmonn_7b": return salmonn_7b_model_generation(self, input)
elif "llama3-s" or "ichigo" in self.model_name.lower(): return llama3_1_s_model_generation(self, input)
else:
raise NotImplementedError("Model {} not implemented yet".format(self.model_name))