오류 내용은 아래에 있습니다.
DEBUG:matplotlib:matplotlib data path: C:\ProgramData\anaconda3\envs\tts\lib\site-packages\matplotlib\mpl-data
DEBUG:matplotlib:CONFIGDIR=C:\Users\Administrator.matplotlib
DEBUG:matplotlib:interactive is False
DEBUG:matplotlib:platform is win32
DEBUG:matplotlib:CACHEDIR=C:\Users\Administrator.matplotlib
DEBUG:matplotlib.font_manager:Using fontManager instance from C:\Users\Administrator.matplotlib\fontlist-v330
.json
DEBUG:urllib3.connectionpool:Starting new HTTPS connection (1): api.gradio.app:443
DEBUG:urllib3.connectionpool:https://api.gradio.app:443 "GET /gradio-messaging/en HTTP/1.1" 200 3
아래는 실행 되었던 코드입니다.
import argparse
import torch
from torch import no_grad, LongTensor
import commons
import utils
import gradio as gr
from models import SynthesizerTrn
from text import text_to_sequence, _clean_text
def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm
def create_tts_fn(model, hps, speaker_ids):
def tts_fn(text, speaker, speed, noise_scale, noise_scale_w):
speaker_id = speaker_ids[speaker]
stn_tst = get_text(text, hps)
with no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = LongTensor([stn_tst.size(0)]).cuda()
sid = LongTensor([speaker_id]).cuda()
audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale,
noise_scale_w=noise_scale_w, length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
del stn_tst, x_tst, x_tst_lengths, sid
return "Success", (hps.data.sampling_rate, audio)
return tts_fn
def create_to_phoneme_fn(hps):
def to_phoneme_fn(text):
return _clean_text(text, hps.data.text_cleaners) if text != "" else ""
return to_phoneme_fn
css = """
#advanced-btn {
color: white;
border-color: black;
background: black;
font-size: .7rem !important;
line-height: 19px;
margin-top: 24px;
margin-bottom: 12px;
padding: 2px 8px;
border-radius: 14px !important;
}
#advanced-options {
display: none;
margin-bottom: 20px;
}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config_path", required=True,
help="path to config file")
parser.add_argument("--model_path", required=True,
help="path to model file")
args = parser.parse_args()
models_tts = []
name = 'VITS-TTS demo v1.01'
example = '학습은 잘 마치셨나요? 좋은 결과가 있길 바래요.'
config_path = args.config_path
model_path = args.model_path
hps = utils.get_hparams_from_file(config_path)
model = SynthesizerTrn(
len(hps.symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).cuda()
utils.load_checkpoint(model_path, model, None)
model.eval()
speaker_ids = [sid for sid, name in enumerate(
hps.speakers) if name != "None"]
speakers = [name for sid, name in enumerate(
hps.speakers) if name != "None"]
models_tts.append((name, speakers, example,
hps.symbols, create_tts_fn(model, hps, speaker_ids),
create_to_phoneme_fn(hps)))
app = gr.Blocks(css=css)
with app:
gr.Markdown("Gradio VITS-TTS Inference demo v1.01\n\n")
with gr.Tabs():
for i, (name, speakers, example, symbols, tts_fn,
to_phoneme_fn) in enumerate(models_tts):
with gr.TabItem(f"VITS-TTS_v1.01"):
with gr.Column():
gr.Markdown(f"## {name}\n\n")
tts_input1 = gr.TextArea(label="Text", value=example,
elem_id=f"tts-input{i}")
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
type="index", value=speakers[0])
tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2,
step=0.05)
noise_scale_slider = gr.Slider(label="Noise-scale (defaults = 0.667)",
value=0.667, minimum=0, maximum=1, step=0.01)
noise_scale_w_slider = gr.Slider(label="Noise-width (defaults = 0.8)",
value=0.8, minimum=0, maximum=2, step=0.05)
tts_submit = gr.Button("Generate", variant="primary")
tts_output1 = gr.Textbox(label="Output Message")
tts_output2 = gr.Audio(label="Output Audio")
tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3,
noise_scale_slider, noise_scale_w_slider],
[tts_output1, tts_output2])
gr.Markdown(
"Originate from \n\n"
"- [https://github.com/kdrkdrkdr]\n\n"
)
app.launch(share=False, inbrowser=True)
if name == "main":
main()
이 코드에는 문제가 있습니다.
음성합성은 실행되지만 속도 조절이 안 됩니다.
이 문제를 인공지능이 수정한 코드입니다.
import argparse
import torch
from torch import no_grad, LongTensor
import commons
import utils
import gradio as gr
from models import SynthesizerTrn
from text import text_to_sequence, _clean_text
def get_text(text, hps):
text_norm = text_to_sequence(text, hps.data.text_cleaners)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm
def create_tts_fn(model, hps, speaker_ids):
def tts_fn(text, speaker, speed, noise_scale, noise_scale_w):
speaker_id = speaker_ids[speaker]
stn_tst = get_text(text, hps)
with no_grad():
x_tst = stn_tst.cuda().unsqueeze(0)
x_tst_lengths = LongTensor([stn_tst.size(0)]).cuda()
sid = LongTensor([speaker_id]).cuda()
length_scale = 1.0 / speed # 수정된 부분: speed 값을 사용하여 length_scale 계산
audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale,
noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
del stn_tst, x_tst, x_tst_lengths, sid
return "Success", (hps.data.sampling_rate, audio)
def create_to_phoneme_fn(hps):
def to_phoneme_fn(text):
return _clean_text(text, hps.data.text_cleaners) if text != "" else ""
return to_phoneme_fn
css = """
#advanced-btn {
color: white;
border-color: black;
background: black;
font-size: .7rem !important;
line-height: 19px;
margin-top: 24px;
margin-bottom: 12px;
padding: 2px 8px;
border-radius: 14px !important;
}
#advanced-options {
display: none;
margin-bottom: 20px;
}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config_path", required=True,
help="path to config file")
parser.add_argument("--model_path", required=True,
help="path to model file")
args = parser.parse_args()
models_tts = []
name = 'VITS-TTS demo v1.01'
example = '학습은 잘 마치셨나요? 좋은 결과가 있길 바래요.'
config_path = args.config_path
model_path = args.model_path
hps = utils.get_hparams_from_file(config_path)
model = SynthesizerTrn(
len(hps.symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model).cuda()
utils.load_checkpoint(model_path, model, None)
model.eval()
speaker_ids = [sid for sid, name in enumerate(
hps.speakers) if name != "None"]
speakers = [name for sid, name in enumerate(
hps.speakers) if name != "None"]
models_tts.append((name, speakers, example,
hps.symbols, create_tts_fn(model, hps, speaker_ids),
create_to_phoneme_fn(hps)))
app = gr.Blocks(css=css)
with app:
gr.Markdown("Gradio VITS-TTS Inference demo v1.01\n\n")
with gr.Tabs():
for i, (name, speakers, example, symbols, tts_fn,
to_phoneme_fn) in enumerate(models_tts):
with gr.TabItem(f"VITS-TTS_v1.01"):
with gr.Column():
gr.Markdown(f"## {name}\n\n")
tts_input1 = gr.TextArea(label="Text", value=example,
elem_id=f"tts-input{i}")
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
type="index", value=speakers[0])
tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2,
step=0.05)
noise_scale_slider = gr.Slider(label="Noise-scale (defaults = 0.667)",
value=0.667, minimum=0, maximum=1, step=0.01)
noise_scale_w_slider = gr.Slider(label="Noise-scale w (defaults = 0.667)",
value=0.667, minimum=0, maximum=1, step=0.01)
tts_output = gr.Output(label="Generated Audio")
tts_button = gr.Button(label="Generate")
def generate_audio():
text = tts_input1.value
speaker = tts_input2.value
speed = tts_input3.value
noise_scale = noise_scale_slider.value
noise_scale_w = noise_scale_w_slider.value
result, audio = tts_fn(
text, speaker, speed, noise_scale, noise_scale_w)
if result == "Success":
tts_output.clear()
tts_output.update(audio)
tts_button.set_action(generate_audio)
gr.Interface(fn=to_phoneme_fn,
inputs=gr.inputs.Textbox(
label="Enter Text to convert to phonemes"),
outputs="text",
title="Text to phoneme conversion").launch()
gr.Interface(fn=tts_fn,
inputs=[gr.inputs.Textbox(label="Text"),
gr.inputs.Radio(
label="Speaker", choices=speakers),
gr.inputs.Number(
label="Speed", default=1, min_value=0.1, max_value=2, step=0.1),
gr.inputs.Slider(
label="Noise Scale", minimum=0, maximum=1, step=0.01, default=0.667),
gr.inputs.Slider(
label="Noise Scale W", minimum=0, maximum=1, step=0.01, default=0.667)
],
outputs=gr.outputs.Audio(
label="Generated Audio"),
title="VITS-TTS_v1.01").launch()
인공지능이 수정한 코드는 실행 되지 않습니다.
오류 내용은 본문 처음에 적어 놓은 게 전부입니다.
뭐가 문제일까요?
gradio 3을 설치했고 처음 제시한 코드는 분명 실행 되었습니다.
속도 조절이 되지 않는 문제가 있지만요.
두번째 수정 된 코드는 실행 되지 않습니다.
속도 조절을 할 수있게 한 것 외에는 건드린 게 없는 거 같은데 왜 실행이 되지 않을까요?