starting inference...
Traceback (most recent call last):
File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/gradio/queueing.py", line 745, in process_events
response = await route_utils.call_process_api(File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/gradio/route_utils.py", line 349, in call_process_api
output = await app.get_blocks().process_api(File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/gradio/blocks.py", line 2123, in process_api
result = await self.call_function(File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/gradio/blocks.py", line 1630, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignoreFile "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2476, in run_sync_in_worker_thread
return await futureFile "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 967, in run
result = context.run(func, *args)File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/gradio/utils.py", line 915, in wrapper
response = f(*args, **kwargs)File "/Volumes/Ventoy/video/index-tts/webui.py", line 170, in gen_single
output = tts.infer(spk_audio_prompt=prompt, text=text,File "/Volumes/Ventoy/video/index-tts/indextts/infer_v2.py", line 372, in infer
return list(self.infer_generator(File "/Volumes/Ventoy/video/index-tts/indextts/infer_v2.py", line 437, in infer_generator
audio_16k = torchaudio.transforms.Resample(sr, 16000)(audio)File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1784, in _call_impl
return forward_call(*args, **kwargs)File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/torchaudio/transforms/_transforms.py", line 980, in forward
return _apply_sinc_resample_kernel(waveform, self.orig_freq, self.new_freq, self.gcd, self.kernel, self.width)File "/Volumes/Ventoy/video/index-tts/.venv/lib/python3.10/site-packages/torchaudio/functional/functional.py", line 1463, in _apply_sinc_resample_kernel
waveform = waveform.view(-1, shape[-1])RuntimeError: cannot reshape tensor of 0 elements into shape [-1, 0] because the unspecified dimension size -1 can be any value and is ambiguous
Emo control mode:0,weight:0.65,vec:None