atalink commited on
Commit
b8a79bd
Β·
1 Parent(s): be0ba66
Files changed (3) hide show
  1. app.py +24 -1
  2. app_tts.py +107 -0
  3. requirements.txt +23 -2
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os, sys
2
  import tempfile
3
  import gradio as gr
 
4
  from src.gradio_demo import SadTalker
5
  # from src.utils.text2speech import TTSTalker
6
  from huggingface_hub import snapshot_download
@@ -39,6 +40,28 @@ def sadtalker_demo():
39
  # tts_talker = TTSTalker()
40
 
41
  with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  gr.Markdown("<div align='center'> <h2> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </span> </h2> \
43
  <a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
44
  <a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
@@ -63,7 +86,7 @@ def sadtalker_demo():
63
  gr.Markdown("Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
64
 
65
  with gr.Row():
66
- driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath", max_length=180) # 180s
67
  driven_audio_no = gr.Audio(label="Use IDLE mode, no audio is required", source="upload", type="filepath", visible=False)
68
 
69
  with gr.Column():
 
1
  import os, sys
2
  import tempfile
3
  import gradio as gr
4
+ from app_tts import infer_tts
5
  from src.gradio_demo import SadTalker
6
  # from src.utils.text2speech import TTSTalker
7
  from huggingface_hub import snapshot_download
 
40
  # tts_talker = TTSTalker()
41
 
42
  with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
43
+ gr.Markdown("""
44
+ # 🎀 F5-TTS: Vietnamese Text-to-Speech Synthesis.
45
+ # The model was trained with approximately 1000 hours of data on a RTX 3090 GPU.
46
+ Enter text and upload a sample voice to generate natural speech.
47
+ """)
48
+
49
+ with gr.Row():
50
+ ref_audio = gr.Audio(label="πŸ”Š Sample Voice", type="filepath")
51
+ gen_text = gr.Textbox(label="πŸ“ Text", placeholder="Enter the text to generate voice...", lines=3)
52
+
53
+ speed = gr.Slider(0.3, 2.0, value=1.0, step=0.1, label="⚑ Speed")
54
+ btn_synthesize = gr.Button("πŸ”₯ Generate Voice")
55
+
56
+ with gr.Row():
57
+ driven_audio = gr.Audio(label="🎧 Generated Audio", type="numpy")
58
+ output_spectrogram = gr.Image(label="πŸ“Š Spectrogram")
59
+
60
+
61
+ btn_synthesize.click(infer_tts, inputs=[ref_audio, gen_text, speed], outputs=[driven_audio, output_spectrogram])
62
+
63
+
64
+
65
  gr.Markdown("<div align='center'> <h2> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </span> </h2> \
66
  <a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
67
  <a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
 
86
  gr.Markdown("Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
87
 
88
  with gr.Row():
89
+ # driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath", max_length=180) # 180s
90
  driven_audio_no = gr.Audio(label="Use IDLE mode, no audio is required", source="upload", type="filepath", visible=False)
91
 
92
  with gr.Column():
app_tts.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from huggingface_hub import login
4
+ import gradio as gr
5
+ from cached_path import cached_path
6
+ import tempfile
7
+ from vinorm import TTSnorm
8
+
9
+ from f5_tts.model import DiT
10
+ from f5_tts.infer.utils_infer import (
11
+ preprocess_ref_audio_text,
12
+ load_vocoder,
13
+ load_model,
14
+ infer_process,
15
+ save_spectrogram,
16
+ )
17
+
18
+ # Retrieve token from secrets
19
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
20
+
21
+
22
+ # Log in to Hugging Face
23
+ if hf_token:
24
+ login(token=hf_token)
25
+
26
+ def post_process(text):
27
+ text = " " + text + " "
28
+ text = text.replace(" . . ", " . ")
29
+ text = " " + text + " "
30
+ text = text.replace(" .. ", " . ")
31
+ text = " " + text + " "
32
+ text = text.replace(" , , ", " , ")
33
+ text = " " + text + " "
34
+ text = text.replace(" ,, ", " , ")
35
+ text = " " + text + " "
36
+ text = text.replace('"', "")
37
+ return " ".join(text.split())
38
+
39
+ # Load models
40
+ vocoder = load_vocoder()
41
+ model = load_model(
42
+ DiT,
43
+ dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4),
44
+ ckpt_path=str(cached_path("hf://hynt/F5-TTS-Vietnamese-ViVoice/model_last.pt")),
45
+ vocab_file=str(cached_path("hf://hynt/F5-TTS-Vietnamese-ViVoice/config.json")),
46
+ )
47
+
48
+ @spaces.GPU
49
+ def infer_tts(ref_audio_orig: str, gen_text: str, speed: float = 1.0, request: gr.Request = None):
50
+
51
+ if not ref_audio_orig:
52
+ raise gr.Error("Please upload a sample audio file.")
53
+ if not gen_text.strip():
54
+ raise gr.Error("Please enter the text content to generate voice.")
55
+ if len(gen_text.split()) > 1000:
56
+ raise gr.Error("Please enter text content with less than 1000 words.")
57
+
58
+ try:
59
+ ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_orig, "")
60
+ final_wave, final_sample_rate, spectrogram = infer_process(
61
+ ref_audio, ref_text.lower(), post_process(TTSnorm(gen_text)).lower(), model, vocoder, speed=speed
62
+ )
63
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_spectrogram:
64
+ spectrogram_path = tmp_spectrogram.name
65
+ save_spectrogram(spectrogram, spectrogram_path)
66
+
67
+ return (final_sample_rate, final_wave), spectrogram_path
68
+ except Exception as e:
69
+ raise gr.Error(f"Error generating voice: {e}")
70
+
71
+ # Gradio UI
72
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
73
+ gr.Markdown("""
74
+ # 🎀 F5-TTS: Vietnamese Text-to-Speech Synthesis.
75
+ # The model was trained with approximately 1000 hours of data on a RTX 3090 GPU.
76
+ Enter text and upload a sample voice to generate natural speech.
77
+ """)
78
+
79
+ with gr.Row():
80
+ ref_audio = gr.Audio(label="πŸ”Š Sample Voice", type="filepath")
81
+ gen_text = gr.Textbox(label="πŸ“ Text", placeholder="Enter the text to generate voice...", lines=3)
82
+
83
+ speed = gr.Slider(0.3, 2.0, value=1.0, step=0.1, label="⚑ Speed")
84
+ btn_synthesize = gr.Button("πŸ”₯ Generate Voice")
85
+
86
+ with gr.Row():
87
+ output_audio = gr.Audio(label="🎧 Generated Audio", type="numpy")
88
+ output_spectrogram = gr.Image(label="πŸ“Š Spectrogram")
89
+
90
+ model_limitations = gr.Textbox(
91
+ value="""1. This model may not perform well with numerical characters, dates, special characters, etc. => A text normalization module is needed.
92
+ 2. The rhythm of some generated audios may be inconsistent or choppy => It is recommended to select clearly pronounced sample audios with minimal pauses for better synthesis quality.
93
+ 3. Default, reference audio text uses the pho-whisper-medium model, which may not always accurately recognize Vietnamese, resulting in poor voice synthesis quality.
94
+ 4. Inference with overly long paragraphs may produce poor results.""",
95
+ label="❗ Model Limitations",
96
+ lines=4,
97
+ interactive=False
98
+ )
99
+
100
+ btn_synthesize.click(infer_tts, inputs=[ref_audio, gen_text, speed], outputs=[output_audio, output_spectrogram])
101
+
102
+ # Run Gradio with share=True to get a gradio.live link
103
+ # demo.queue().launch()
104
+
105
+ if __name__ == "__main__":
106
+
107
+ demo.queue().launch()
requirements.txt CHANGED
@@ -17,8 +17,29 @@ pyyaml
17
  joblib==1.1.0
18
  scikit-image==0.19.3
19
  basicsr==1.4.2
20
- facexlib==0.3.0
21
  dlib-bin
22
  gfpgan
23
  av
24
- safetensors
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  joblib==1.1.0
18
  scikit-image==0.19.3
19
  basicsr==1.4.2
20
+ facexlib
21
  dlib-bin
22
  gfpgan
23
  av
24
+ safetensors
25
+ gradio
26
+ soundfile
27
+ transformers
28
+ bitsandbytes>0.37.0
29
+ vinorm
30
+ cached_path
31
+ huggingface_hub
32
+ accelerate>=0.33.0
33
+ click
34
+ datasets
35
+ ema_pytorch>=0.5.2
36
+ hydra-core>=1.3.0
37
+ jieba
38
+ matplotlib
39
+ pypinyin
40
+ tomli
41
+ torchdiffeq
42
+ transformers_stream_generator
43
+ vocos
44
+ wandb
45
+ x_transformers>=1.31.14