biosn2 commited on
Commit
f968496
·
verified ·
1 Parent(s): b9e0911

Upload app1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app1.py +228 -0
app1.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import sys
4
+ import threading
5
+ import time
6
+ import subprocess
7
+
8
+ from huggingface_hub import snapshot_download
9
+ import warnings
10
+ warnings.filterwarnings("ignore", category=FutureWarning)
11
+ warnings.filterwarnings("ignore", category=UserWarning)
12
+
13
+ import argparse
14
+ # ----------------- 命令行参数解析 -----------------
15
+ parser = argparse.ArgumentParser(description="IndexTTS WebUI")
16
+ parser.add_argument("--verbose", action="store_true", default=False, help="Enable verbose mode") # 是否打印详细日志
17
+ parser.add_argument("--port", type=int, default=7860, help="Port to run the web UI on") # WebUI 端口
18
+ parser.add_argument("--host", type=str, default="127.0.0.1", help="Host to run the web UI on") # WebUI 主机地址
19
+ parser.add_argument("--model_dir", type=str, default="checkpoints", help="Model checkpoints directory") # 模型目录
20
+ cmd_args = parser.parse_args()
21
+
22
+ # ----------------- 设置模块搜索路径 -----------------
23
+ current_dir = os.path.dirname(os.path.abspath(__file__))
24
+ sys.path.append(current_dir)
25
+ sys.path.append(os.path.join(current_dir, "indextts"))
26
+
27
+ # ----------------- 下载模型 -----------------
28
+ MODE = 'local'
29
+ snapshot_download("IndexTeam/IndexTTS-1.5", local_dir="checkpoints") # 从 Hugging Face 下载模型到本地
30
+
31
+ # ----------------- 检查模型文件完整性 -----------------
32
+ if not os.path.exists(cmd_args.model_dir):
33
+ print(f"Model directory {cmd_args.model_dir} does not exist. Please download the model first.")
34
+ sys.exit(1)
35
+
36
+ for file in [
37
+ "bigvgan_generator.pth",
38
+ "bpe.model",
39
+ "gpt.pth",
40
+ "config.yaml",
41
+ ]:
42
+ file_path = os.path.join(cmd_args.model_dir, file)
43
+ if not os.path.exists(file_path):
44
+ print(f"Required file {file_path} does not exist. Please download it.")
45
+ sys.exit(1)
46
+
47
+ # ----------------- 导入 Gradio 和其他模块 -----------------
48
+ import gradio as gr
49
+ import pandas as pd
50
+
51
+ from indextts.infer import IndexTTS # 核心 TTS 推理类
52
+ from tools.i18n.i18n import I18nAuto # 国际化工具
53
+
54
+ # ----------------- 初始化 TTS 模型 -----------------
55
+ i18n = I18nAuto(language="en") # 设置默认中文
56
+ tts = IndexTTS(model_dir=cmd_args.model_dir, cfg_path=os.path.join(cmd_args.model_dir, "config.yaml")) # 加载模型
57
+
58
+ # ----------------- 创建输出目录 -----------------
59
+ os.makedirs("outputs/tasks", exist_ok=True)
60
+ os.makedirs("prompts", exist_ok=True)
61
+
62
+ # ----------------- 核心函数 -----------------
63
+
64
+ def ensure_wav(file_path):
65
+ """
66
+ 确保输入音频是 WAV 格式
67
+ 如果不是 WAV,使用 ffmpeg 转换
68
+ 返回 WAV 文件路径
69
+ """
70
+ if not file_path.lower().endswith(".wav"):
71
+ wav_path = file_path.rsplit(".", 1)[0] + ".wav"
72
+ subprocess.run(["ffmpeg", "-y", "-i", file_path, wav_path], check=True)
73
+ return wav_path
74
+ return file_path
75
+
76
+ def progress_print(step, total, info=""):
77
+ """
78
+ 打印生成音频的进度到终端
79
+ step: 当前步骤
80
+ total: 总步骤数
81
+ info: 附加信息
82
+ """
83
+ percent = int(step / total * 100)
84
+ print(f"\r[{percent}%] {info}", end="", flush=True)
85
+
86
+ def gen_single(prompt, text, max_text_tokens_per_sentence=120, *args, progress=gr.Progress()):
87
+ """
88
+ 单句音频生成函数
89
+ prompt: 参考音频路径
90
+ text: 目标文本
91
+ max_text_tokens_per_sentence: 分句最大 Token 数
92
+ *args: 高级生成参数(do_sample, top_p, top_k, temperature 等)
93
+ progress: Gradio 进度条对象
94
+
95
+ 返回生成的音频路径
96
+ """
97
+ prompt = ensure_wav(prompt) # 转换为 WAV
98
+ output_path = os.path.join("outputs", f"spk_{int(time.time())}.wav") # 输出文件名
99
+ tts.gr_progress = progress
100
+ tts.print_progress = progress_print # 设置进度打印函数
101
+
102
+ # 解包高级参数
103
+ do_sample, top_p, top_k, temperature, \
104
+ length_penalty, num_beams, repetition_penalty, max_mel_tokens = args
105
+ kwargs = {
106
+ "do_sample": bool(do_sample), # 是否启用随机采样,True 生成多样化音频,False 贪婪生成固定结果
107
+ # 默认值: True,范围: {True, False}
108
+
109
+ "top_p": float(top_p), # 核采样概率阈值,只从累计概率 >= top_p 的词集合中采样
110
+ # 默认值: 0.8,范围: 0.0 ~ 1.0(越大生成越自由)
111
+
112
+ "top_k": int(top_k) if int(top_k) > 0 else None, # 从概率最高的 top_k 个词中采样,None 表示不限制
113
+ # 默认值: 30,范围: 0 ~ 100(0 表示禁用)
114
+
115
+ "temperature": float(temperature), # 采样温度,控制输出随机性
116
+ # 默认值: 1.0,范围: 0.1 ~ 2.0(低值更稳定,高值更有变化)
117
+
118
+ "length_penalty": float(length_penalty), # 长度惩罚,调节生成的音频时长
119
+ # 默认值: 0.0,范围: -2.0 ~ 2.0(正值更长,负值更短)
120
+
121
+ "num_beams": num_beams, # Beam Search 束宽,越大生成更自然,但速度更慢
122
+ # 默认值: 3,范围: 1 ~ 10
123
+
124
+ "repetition_penalty": float(repetition_penalty), # 重复惩罚,避免模型重复生成音素或词
125
+ # 默认值: 10.0,范围: 0.1 ~ 20.0(>1减少重复)
126
+
127
+ "max_mel_tokens": int(max_mel_tokens), # 最大 mel 频谱长度,控制生成音频的最大帧数
128
+ # 默认值: 600,范围: 50 ~ cfg.gpt.max_mel_tokens(依模型配置)
129
+ }
130
+ # 调用 TTS 推理
131
+ output = tts.infer(prompt, text, output_path, verbose=cmd_args.verbose,
132
+ max_text_tokens_per_sentence=int(max_text_tokens_per_sentence),
133
+ **kwargs)
134
+
135
+ print("\n生成完成:", output_path)
136
+ return gr.update(value=output, visible=True)
137
+
138
+ def update_prompt_audio():
139
+ """
140
+ 上传参考音频时触发
141
+ 激活生成按钮
142
+ """
143
+ return gr.update(interactive=True)
144
+
145
+ # ----------------- Gradio WebUI 构建 -----------------
146
+ with gr.Blocks(title="IndexTTS Demo") as demo:
147
+ mutex = threading.Lock()
148
+ gr.HTML('''
149
+ 标题
150
+ ''')
151
+ with gr.Tab("音频生成"):
152
+ with gr.Row():
153
+ os.makedirs("prompts", exist_ok=True)
154
+ prompt_audio = gr.Audio(label="参考音频", key="prompt_audio",
155
+ sources=["upload","microphone"], type="filepath")
156
+ with gr.Column():
157
+ input_text_single = gr.TextArea(label="文本", key="input_text_single", placeholder="请输入目标文本", info="当前模型版本{}".format(tts.model_version or "1.0"))
158
+ gen_button = gr.Button("生成语音", key="gen_button", interactive=True)
159
+ output_audio = gr.Audio(label="生成结果", visible=True, key="output_audio")
160
+ # 高级参数设置
161
+ with gr.Accordion("高级生成参数设置", open=False):
162
+ # GPT2 采样参数
163
+ with gr.Row():
164
+ with gr.Column(scale=1):
165
+ gr.Markdown("**GPT2 采样设置** _参数会影响音频多样性和生成速度_")
166
+ with gr.Row():
167
+ do_sample = gr.Checkbox(label="do_sample", value=True)
168
+ temperature = gr.Slider(label="temperature", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
169
+ with gr.Row():
170
+ top_p = gr.Slider(label="top_p", minimum=0.0, maximum=1.0, value=0.8, step=0.01)
171
+ top_k = gr.Slider(label="top_k", minimum=0, maximum=100, value=30, step=1)
172
+ num_beams = gr.Slider(label="num_beams", value=3, minimum=1, maximum=10, step=1)
173
+ with gr.Row():
174
+ repetition_penalty = gr.Number(label="repetition_penalty", precision=None, value=10.0, minimum=0.1, maximum=20.0, step=0.1)
175
+ length_penalty = gr.Number(label="length_penalty", precision=None, value=0.0, minimum=-2.0, maximum=2.0, step=0.1)
176
+ max_mel_tokens = gr.Slider(label="max_mel_tokens", value=600, minimum=50, maximum=tts.cfg.gpt.max_mel_tokens, step=10)
177
+ # 分句设置
178
+ with gr.Column(scale=2):
179
+ gr.Markdown("**分句设置**")
180
+ with gr.Row():
181
+ max_text_tokens_per_sentence = gr.Slider(label="分句最大Token数", value=120, minimum=20, maximum=tts.cfg.gpt.max_text_tokens, step=2)
182
+ with gr.Accordion("预览分句结果", open=True) as sentences_settings:
183
+ sentences_preview = gr.Dataframe(headers=["序号", "分句内容", "Token数"], key="sentences_preview", wrap=True)
184
+
185
+ advanced_params = [
186
+ do_sample, top_p, top_k, temperature,
187
+ length_penalty, num_beams, repetition_penalty, max_mel_tokens,
188
+ ]
189
+
190
+ # 分句预览逻辑
191
+ input_text_single.change(
192
+ lambda text, max_tokens_per_sentence: {
193
+ sentences_preview: gr.update(value=[
194
+ [i, ''.join(s), len(s)] for i, s in enumerate(
195
+ tts.tokenizer.split_sentences(tts.tokenizer.tokenize(text), int(max_tokens_per_sentence))
196
+ )
197
+ ]) if text else gr.update(value=pd.DataFrame([], columns=["序号","分句内容","Token数"]))
198
+ },
199
+ inputs=[input_text_single, max_text_tokens_per_sentence],
200
+ outputs=[sentences_preview]
201
+ )
202
+ max_text_tokens_per_sentence.change(
203
+ lambda text, max_tokens_per_sentence: {
204
+ sentences_preview: gr.update(value=[
205
+ [i, ''.join(s), len(s)] for i, s in enumerate(
206
+ tts.tokenizer.split_sentences(tts.tokenizer.tokenize(text), int(max_tokens_per_sentence))
207
+ )
208
+ ]) if text else gr.update(value=pd.DataFrame([], columns=["序号","分句内容","Token数"]))
209
+ },
210
+ inputs=[input_text_single, max_text_tokens_per_sentence],
211
+ outputs=[sentences_preview]
212
+ )
213
+ prompt_audio.upload(update_prompt_audio, inputs=[], outputs=[gen_button])
214
+
215
+ # 点击生成按钮调用 gen_single
216
+ gen_button.click(gen_single,
217
+ inputs=[prompt_audio, input_text_single, max_text_tokens_per_sentence, *advanced_params],
218
+ outputs=[output_audio])
219
+
220
+ # ----------------- 启动函数 -----------------
221
+ def main():
222
+ """
223
+ 启动 Gradio WebUI
224
+ """
225
+ demo.launch(server_name="0.0.0.0", server_port=cmd_args.port)
226
+
227
+ if __name__ == "__main__":
228
+ main()