Skip to content

Commit

Permalink
Merge branch 'dev_wyan' into 'dev'
Browse files Browse the repository at this point in the history
update ai demo & libs

See merge request maix_sw/k230_canmv!256
  • Loading branch information
wyWangYan01 committed Jun 20, 2024
2 parents 22b50e6 + 1786e8e commit 0127f6c
Show file tree
Hide file tree
Showing 13 changed files with 345 additions and 32 deletions.
7 changes: 4 additions & 3 deletions fs_resource/libs/AIBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def get_kmodel_outputs_num(self):

def preprocess(self,input_np):
with ScopedTiming("preprocess",self.debug_mode > 0):
return self.ai2d.run(input_np)
return [self.ai2d.run(input_np)]

def inference(self,tensors):
with ScopedTiming("kpu run & get output",self.debug_mode > 0):
Expand All @@ -67,15 +67,16 @@ def postprocess(self,results):
def run(self,input_np):
self.cur_img=input_np
self.tensors.clear()
self.tensors.append(self.preprocess(input_np))
self.tensors=self.preprocess(input_np)
self.results=self.inference(self.tensors)
return self.postprocess(self.results)

# AIBase销毁函数
def deinit(self):
with ScopedTiming("deinit",self.debug_mode > 0):
del self.kpu
del self.ai2d
if hasattr(self,"ai2d"):
del self.ai2d
self.tensors.clear()
del self.tensors
gc.collect()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def load_image(self,image_path,label):
self.config_preprocess([img.shape[2],img.shape[1]])
# 预处理,推理,输出特征入库,特征标签入库
tensor=self.preprocess(img)
results=self.inference([tensor])
results=self.inference(tensor)
self.embeddings.append(results[0][0])
self.embeddings_labels.append(label)
# 重置为视频流的预处理
Expand Down
2 changes: 1 addition & 1 deletion fs_resource/tests/ai_demo/face_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ def draw_result(self,pl,dets,recg_results):
det_dim=4
anchors = np.fromfile(anchors_path, dtype=np.float)
anchors = anchors.reshape((anchor_len,det_dim))
face_recognition_threshold = 0.5 # 人脸识别阈值
face_recognition_threshold = 0.75 # 人脸识别阈值

# 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率
pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode)
Expand Down
16 changes: 9 additions & 7 deletions fs_resource/tests/ai_demo/face_registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,16 @@ def __init__(self,kmodel_path,model_input_size,anchors,confidence_threshold=0.25
self.ai2d=Ai2d(debug_mode)
# 设置Ai2d的输入输出格式和类型
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
self.image_size=[]

# 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看
def config_preprocess(self,input_image_size=None):
with ScopedTiming("set preprocess config",self.debug_mode > 0):
# 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,可以通过设置input_image_size自行修改输入尺寸
ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
self.image_size=[input_image_size[1],input_image_size[0]]
# 计算padding参数,并设置padding预处理
self.ai2d.pad(self.get_pad_param(), 0, [104,117,123])
self.ai2d.pad(self.get_pad_param(ai2d_input_size), 0, [104,117,123])
# 设置resize预处理
self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
# 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape
Expand All @@ -54,24 +56,24 @@ def config_preprocess(self,input_image_size=None):
# 自定义后处理,results是模型输出的array列表,这里使用了aidemo库的face_det_post_process接口
def postprocess(self,results):
with ScopedTiming("postprocess",self.debug_mode > 0):
res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.rgb888p_size,results)
res = aidemo.face_det_post_process(self.confidence_threshold,self.nms_threshold,self.model_input_size[0],self.anchors,self.image_size,results)
if len(res)==0:
return res
else:
return res[0],res[1]

def get_pad_param(self):
def get_pad_param(self,image_input_size):
dst_w = self.model_input_size[0]
dst_h = self.model_input_size[1]
# 计算最小的缩放比例,等比例缩放
ratio_w = dst_w / self.rgb888p_size[0]
ratio_h = dst_h / self.rgb888p_size[1]
ratio_w = dst_w / image_input_size[0]
ratio_h = dst_h / image_input_size[1]
if ratio_w < ratio_h:
ratio = ratio_w
else:
ratio = ratio_h
new_w = (int)(ratio * self.rgb888p_size[0])
new_h = (int)(ratio * self.rgb888p_size[1])
new_w = (int)(ratio * image_input_size[0])
new_h = (int)(ratio * image_input_size[1])
dw = (dst_w - new_w) / 2
dh = (dst_h - new_h) / 2
top = (int)(round(0))
Expand Down
111 changes: 111 additions & 0 deletions fs_resource/tests/ai_demo/keyword_spotting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
from libs.PipeLine import ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
from media.pyaudio import * # 音频模块
from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区
import media.wave as wave # wav音频处理模块
import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作
import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同
import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作
import time # 时间统计
import struct # 字节字符转换模块
import gc # 垃圾回收模块
import os,sys # 操作系统接口模块

# 自定义关键词唤醒类,继承自AIBase基类
class KWSApp(AIBase):
def __init__(self, kmodel_path, threshold, debug_mode=0):
super().__init__(kmodel_path) # 调用基类的构造函数
self.kmodel_path = kmodel_path # 模型文件路径
self.threshold=threshold
self.debug_mode = debug_mode # 是否开启调试模式
self.cache_np = np.zeros((1, 256, 105), dtype=np.float)

# 自定义预处理,返回模型输入tensor列表
def preprocess(self,pcm_data):
pcm_data_list=[]
# 获取音频流数据
for i in range(0, len(pcm_data), 2):
# 每两个字节组织成一个有符号整数,然后将其转换为浮点数,即为一次采样的数据,加入到当前一帧(0.3s)的数据列表中
int_pcm_data = struct.unpack("<h", pcm_data[i:i+2])[0]
float_pcm_data = float(int_pcm_data)
pcm_data_list.append(float_pcm_data)
# 将pcm数据处理为模型输入的特征向量
mp_feats = aidemo.kws_preprocess(fp, pcm_data_list)[0]
mp_feats_np = np.array(mp_feats).reshape((1, 30, 40))
audio_input_tensor = nn.from_numpy(mp_feats_np)
cache_input_tensor = nn.from_numpy(self.cache_np)
return [audio_input_tensor,cache_input_tensor]

# 自定义当前任务的后处理,results是模型输出array列表
def postprocess(self, results):
with ScopedTiming("postprocess", self.debug_mode > 0):
logits_np = results[0]
self.cache_np= results[1]
max_logits = np.max(logits_np, axis=1)[0]
max_p = np.max(max_logits)
idx = np.argmax(max_logits)
# 如果分数大于阈值,且idx==1(即包含唤醒词),播放回复音频
if max_p > self.threshold and idx == 1:
return 1
else:
return 0


if __name__ == "__main__":
os.exitpoint(os.EXITPOINT_ENABLE)
nn.shrink_memory_pool()
# 设置模型路径和其他参数
kmodel_path = "/sdcard/app/tests/kmodel/kws.kmodel"
# 其它参数
THRESH = 0.5 # 检测阈值
SAMPLE_RATE = 16000 # 采样率16000Hz,即每秒采样16000次
CHANNELS = 1 # 通道数 1为单声道,2为立体声
FORMAT = paInt16 # 音频输入输出格式 paInt16
CHUNK = int(0.3 * 16000) # 每次读取音频数据的帧数,设置为0.3s的帧数16000*0.3=4800
reply_wav_file = "/sdcard/app/tests/utils/wozai.wav" # kws唤醒词回复音频路径

# 初始化音频预处理接口
fp = aidemo.kws_fp_create()
# 初始化音频流
p = PyAudio()
p.initialize(CHUNK)
MediaManager.init() #vb buffer初始化
# 用于采集实时音频数据
input_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,input=True,frames_per_buffer=CHUNK)
# 用于播放回复音频
output_stream = p.open(format=FORMAT,channels=CHANNELS,rate=SAMPLE_RATE,output=True,frames_per_buffer=CHUNK)
# 初始化自定义关键词唤醒实例
kws = KWSApp(kmodel_path,threshold=THRESH,debug_mode=0)

try:
while True:
os.exitpoint() # 检查是否有退出信号
with ScopedTiming("total",1):
pcm_data=input_stream.read()
res=kws.run(pcm_data)
if res:
print("====Detected XiaonanXiaonan!====")
wf = wave.open(reply_wav_file, "rb")
wav_data = wf.read_frames(CHUNK)
while wav_data:
output_stream.write(wav_data)
wav_data = wf.read_frames(CHUNK)
time.sleep(1) # 时间缓冲,用于播放回复声音
wf.close()
else:
print("Deactivated!")
gc.collect() # 垃圾回收
except Exception as e:
sys.print_exception(e) # 打印异常信息
finally:
input_stream.stop_stream()
output_stream.stop_stream()
input_stream.close()
output_stream.close()
p.terminate()
MediaManager.deinit() #释放vb buffer
aidemo.kws_fp_destroy(fp)
kws.deinit() # 反初始化


24 changes: 6 additions & 18 deletions fs_resource/tests/ai_demo/nanotracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ def config_preprocess(self,input_image_size=None):
def preprocess(self,input_np):
if self.need_pad:
pad_output=self.ai2d_pad.run(input_np).to_numpy()
return self.ai2d_crop.run(pad_output)
return [self.ai2d_crop.run(pad_output)]
else:
return self.ai2d_crop.run(input_np)
return [self.ai2d_crop.run(input_np)]

# 自定义后处理,results是模型输出array的列表
def postprocess(self,results):
Expand Down Expand Up @@ -102,15 +102,9 @@ def get_padding_crop_param(self):
#重写deinit
def deinit(self):
with ScopedTiming("deinit",self.debug_mode > 0):
del self.kpu
del self.ai2d_pad
del self.ai2d_crop
self.tensors.clear()
del self.tensors
gc.collect()
nn.shrink_memory_pool()
os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
time.sleep_ms(100)
super().deinit()

# 自定义跟踪实时任务类
class TrackSrcApp(AIBase):
Expand Down Expand Up @@ -169,9 +163,9 @@ def preprocess(self,input_np):
with ScopedTiming("preprocess",self.debug_mode>0):
if self.need_pad:
pad_output=self.ai2d_pad.run(input_np).to_numpy()
return self.ai2d_crop.run(pad_output)
return [self.ai2d_crop.run(pad_output)]
else:
return self.ai2d_crop.run(input_np)
return [self.ai2d_crop.run(input_np)]

# 自定义后处理,results是模型输出array的列表
def postprocess(self,results):
Expand Down Expand Up @@ -199,15 +193,9 @@ def get_padding_crop_param(self,center_xy_wh):
# 重写deinit
def deinit(self):
with ScopedTiming("deinit",self.debug_mode > 0):
del self.kpu
del self.ai2d_pad
del self.ai2d_crop
self.tensors.clear()
del self.tensors
gc.collect()
nn.shrink_memory_pool()
os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
time.sleep_ms(100)
super().deinit()


class TrackerApp(AIBase):
Expand Down
6 changes: 4 additions & 2 deletions fs_resource/tests/ai_demo/self_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,11 @@ def draw_result(self,pl,feature):

#数据初始化
def data_init(self):
if os.path.exists(self.database_path):
try:
os.rmdir(self.database_path)
os.mkdir(self.database_path)
os.mkdir(self.database_path)
except:
os.mkdir(self.database_path)
self.crop_x_osd = int(self.crop_x / self.rgb888p_size[0] * self.display_size[0])
self.crop_y_osd = int(self.crop_y / self.rgb888p_size[1] * self.display_size[1])
self.crop_w_osd = int(self.crop_w / self.rgb888p_size[0] * self.display_size[0])
Expand Down
Loading

0 comments on commit 0127f6c

Please sign in to comment.