forked from yl4579/StyleTTS2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app_additional_voices.py
135 lines (122 loc) · 7.19 KB
/
app_additional_voices.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
# Gradio demo of StyleTTS 2 by @fakerybakery
# modified with additional_voices functionality, and some functionality removed
import gradio as gr
import logging
import msinference
#import ljinference
import torch
import os
from tortoise.utils.text import split_and_recombine_text
import numpy as np
import pickle
logging.basicConfig(level=logging.INFO)
theme = gr.themes.Base(
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
)
voicelist = ['f-us-1', 'f-us-2', 'f-us-3', 'f-us-4', 'm-us-1', 'm-us-2', 'm-us-3', 'm-us-4']
voices = {}
import phonemizer
global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)
# todo: cache computed style, load using pickle
# if os.path.exists('voices.pkl'):
# with open('voices.pkl', 'rb') as f:
# voices = pickle.load(f)
# else:
for v in voicelist:
voices[v] = msinference.compute_style(f'voices/{v}.wav')
ADDITIONAL_VOICE_DIR = os.environ.get('ADDITIONAL_VOICE_DIR', 'additional_voices')
def scan_additional_voice_dir():
if ADDITIONAL_VOICE_DIR:
logging.info(f"Scanning for additional voices from: {ADDITIONAL_VOICE_DIR}")
new_voices_count = 0
existing_voices_count = 0
try:
all_files = os.listdir(ADDITIONAL_VOICE_DIR)
additional_voicelist = [os.path.splitext(f)[0] for f in all_files if f.endswith('.wav')]
for v in additional_voicelist:
if v not in voices:
voicelist.append(v)
wav_file_path = os.path.join(ADDITIONAL_VOICE_DIR, f"{v}.wav")
voices[v] = msinference.compute_style(wav_file_path)
logging.info(f" Voice {v} computed")
new_voices_count += 1
else:
existing_voices_count += 1
logging.info(f"Added: {new_voices_count}, Previously Computed: {existing_voices_count}")
except Exception as e:
logging.error(f"An error occurred while loading additional voices: {e}")
else:
logging.info("No additional voice directory set.")
# Call the function initially to load the voices
scan_additional_voice_dir()
def synthesize(text, voice, lngsteps, password, progress=gr.Progress()):
if text.strip() == "":
raise gr.Error("You must enter some text")
if lngsteps > 25:
raise gr.Error("Max 25 steps")
if lngsteps < 5:
raise gr.Error("Min 5 steps")
texts = split_and_recombine_text(text)
v = voice.lower()
audios = []
for t in progress.tqdm(texts):
audios.append(msinference.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
return (24000, np.concatenate(audios))
def clsynthesize(text, voice, vcsteps):
if text.strip() == "":
raise gr.Error("You must enter some text")
# if global_phonemizer.phonemize([text]) > 300:
if len(text) > 400:
raise gr.Error("Text must be under 400 characters")
return (24000, msinference.inference(text, msinference.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=vcsteps, embedding_scale=1))
#def ljsynthesize(text):
# if text.strip() == "":
# raise gr.Error("You must enter some text")
# # if global_phonemizer.phonemize([text]) > 300:
# if len(text) > 400:
# raise gr.Error("Text must be under 400 characters")
# noise = torch.randn(1,1,256).to('cuda' if torch.cuda.is_available() else 'cpu')
# return (24000, ljinference.inference(text, noise, diffusion_steps=7, embedding_scale=1))
with gr.Blocks() as vctk: # just realized it isn't vctk but libritts but i'm too lazy to change it rn
with gr.Row():
with gr.Column(scale=1):
inp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-2', interactive=True)
multispeakersteps = gr.Slider(minimum=5, maximum=15, value=7, step=1, label="Diffusion Steps", info="Higher = better quality, but slower", interactive=True)
# use_gruut = gr.Checkbox(label="Use alternate phonemizer (Gruut) - Experimental")
with gr.Column(scale=1):
btn = gr.Button("Synthesize", variant="primary")
audio = gr.Audio(interactive=False, label="Synthesized Audio")
btn.click(synthesize, inputs=[inp, voice, multispeakersteps], outputs=[audio], concurrency_limit=4)
with gr.Blocks() as clone:
with gr.Row():
with gr.Column(scale=1):
clinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
clvoice = gr.Audio(label="Voice", interactive=True, type='filepath', max_length=300)
vcsteps = gr.Slider(minimum=5, maximum=20, value=20, step=1, label="Diffusion Steps", info="Higher = better quality, but slower", interactive=True)
with gr.Column(scale=1):
clbtn = gr.Button("Synthesize", variant="primary")
claudio = gr.Audio(interactive=False, label="Synthesized Audio")
clbtn.click(clsynthesize, inputs=[clinp, clvoice, vcsteps], outputs=[claudio], concurrency_limit=4)
#with gr.Blocks() as lj:
# with gr.Row():
# with gr.Column(scale=1):
# ljinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
# with gr.Column(scale=1):
# ljbtn = gr.Button("Synthesize", variant="primary")
# ljaudio = gr.Audio(interactive=False, label="Synthesized Audio")
# ljbtn.click(ljsynthesize, inputs=[ljinp], outputs=[ljaudio], concurrency_limit=4)
with gr.Blocks(title="StyleTTS 2", css="footer{display:none !important}", theme=theme) as demo:
gr.Markdown("""# StyleTTS 2
[Paper](https://arxiv.org/abs/2306.07691) - [Samples](https://styletts2.github.io/) - [Code](https://github.com/yl4579/StyleTTS2)
GUI of StyleTTS 2 by [mrfakename](https://twitter.com/realmrfakename).
#### Help the StyleTTS 2 space get to the top of HF Trending! [Give it a Like!](https://huggingface.co/spaces/styletts2/styletts2)
**Before using this demo, you agree to inform the listeners that the speech samples are synthesized by the pre-trained models, unless you have the permission to use the voice you synthesize. That is, you agree to only use voices whose speakers grant the permission to have their voice cloned, either directly or by license before making synthesized voices public, or you have to publicly announce that these voices are synthesized if you do not have the permission to use these voices.**
**NOTE: StyleTTS 2 does better on longer texts.** For example, making it say "hi" will produce a lower-quality result than making it say a longer phrase.""")
gr.TabbedInterface([vctk, clone], ['Multi-Voice', 'Voice Cloning'])
gr.Markdown("""
Demo by [mrfakename](https://twitter.com/realmrfakename). I am not affiliated with the StyleTTS 2 authors.
This is the local version of the demo
""")
if __name__ == "__main__":
demo.queue(api_open=False, max_size=15).launch(show_api=False, server_port=8083, server_name="0.0.0.0")