forked from coqui-ai/STT-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
audioTranscript_cmd.py
92 lines (76 loc) · 4.1 KB
/
audioTranscript_cmd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import sys
import os
import logging
import argparse
import subprocess
import shlex
import numpy as np
import wavTranscriber
# Debug helpers
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
def main(args):
parser = argparse.ArgumentParser(description='Transcribe long audio files using webRTC VAD or use the streaming interface')
parser.add_argument('--aggressive', type=int, choices=range(4), required=False,
help='Determines how aggressive filtering out non-speech is. (Interger between 0-3)')
parser.add_argument('--audio', required=False,
help='Path to the audio file to run (WAV format)')
parser.add_argument('--model', required=True,
help='Path to directory that contains all model files (output_graph and scorer)')
parser.add_argument('--stream', required=False, action='store_true',
help='To use stt streaming interface')
args = parser.parse_args()
if args.stream is True:
print("Opening mic for streaming")
elif args.audio is not None:
logging.debug("Transcribing audio file @ %s" % args.audio)
else:
parser.print_help()
parser.exit()
# Point to a path containing the pre-trained models & resolve ~ if used
dirName = os.path.expanduser(args.model)
# Resolve all the paths of model files
output_graph, scorer = wavTranscriber.resolve_models(dirName)
# Load output_graph, alpahbet and scorer
model_retval = wavTranscriber.load_model(output_graph, scorer)
if args.audio is not None:
title_names = ['Filename', 'Duration(s)', 'Inference Time(s)', 'Model Load Time(s)', 'Scorer Load Time(s)']
print("\n%-30s %-20s %-20s %-20s %s" % (title_names[0], title_names[1], title_names[2], title_names[3], title_names[4]))
inference_time = 0.0
# Run VAD on the input file
waveFile = args.audio
segments, sample_rate, audio_length = wavTranscriber.vad_segment_generator(waveFile, args.aggressive)
f = open(waveFile.rstrip(".wav") + ".txt", 'w')
logging.debug("Saving Transcript @: %s" % waveFile.rstrip(".wav") + ".txt")
for i, segment in enumerate(segments):
# Run stt on the chunk that just completed VAD
logging.debug("Processing chunk %002d" % (i,))
audio = np.frombuffer(segment, dtype=np.int16)
output = wavTranscriber.stt(model_retval[0], audio, sample_rate)
inference_time += output[1]
logging.debug("Transcript: %s" % output[0])
f.write(output[0] + " ")
# Summary of the files processed
f.close()
# Extract filename from the full file path
filename, ext = os.path.split(os.path.basename(waveFile))
logging.debug("************************************************************************************************************")
logging.debug("%-30s %-20s %-20s %-20s %s" % (title_names[0], title_names[1], title_names[2], title_names[3], title_names[4]))
logging.debug("%-30s %-20.3f %-20.3f %-20.3f %-0.3f" % (filename + ext, audio_length, inference_time, model_retval[1], model_retval[2]))
logging.debug("************************************************************************************************************")
print("%-30s %-20.3f %-20.3f %-20.3f %-0.3f" % (filename + ext, audio_length, inference_time, model_retval[1], model_retval[2]))
else:
sctx = model_retval[0].createStream()
subproc = subprocess.Popen(shlex.split('rec -q -V0 -e signed -L -c 1 -b 16 -r 16k -t raw - gain -2'),
stdout=subprocess.PIPE,
bufsize=0)
print('You can start speaking now. Press Control-C to stop recording.')
try:
while True:
data = subproc.stdout.read(512)
sctx.feedAudioContent(np.frombuffer(data, np.int16))
except KeyboardInterrupt:
print('Transcription: ', sctx.finishStream())
subproc.terminate()
subproc.wait()
if __name__ == '__main__':
main(sys.argv[1:])