#4497 TaskCfgSTT(is_cuda=True, uuid='57da8ab8e2', cache_folder='J:/pyvideotrans-v3.99-0508/tmp/9564/57da8ab8e2', target_dir='J

103.197* Posted at: 2 hours ago 👁6

语音识别阶段出错 [openai-whisper(本地)] Traceback (most recent call last):
File "videotrans\process\stt_fun.py", line 109, in openai_whisper
File "whisper\transcribe.py", line 295, in transcribe
File "whisper\transcribe.py", line 201, in decode_with_fallback
File "torch\utils\_contextlib.py", line 116, in decorate_context

return func(*args, **kwargs)

File "whisper\decoding.py", line 824, in decode
File "torch\utils\_contextlib.py", line 116, in decorate_context

return func(*args, **kwargs)

File "whisper\decoding.py", line 737, in run
File "whisper\decoding.py", line 703, in _main_loop
File "whisper\decoding.py", line 283, in update
File "torch\distributions\categorical.py", line 73, in init

super().__init__(batch_shape, validate_args=validate_args)

File "torch\distributions\distribution.py", line 72, in init

raise ValueError(

ValueError: Expected parameter logits (Tensor of shape (1, 51866)) of distribution Categorical(logits: torch.Size([1, 51866])) to satisfy the constraint IndependentConstraint(Real(), 1), but found invalid values:
tensor([[nan, nan, nan, ..., nan, nan, nan]], device='cuda:0')

Traceback (most recent call last):
File "videotrans\task\job.py", line 105, in run
File "videotrans\task\_speech2text.py", line 191, in recogn
File "videotrans\recognition\__init__.py", line 293, in run
File "videotrans\recognition\_base.py", line 143, in run
File "videotrans\recognition\_overall.py", line 31, in _exec
File "videotrans\recognition\_overall.py", line 74, in _openai
File "videotrans\configure\_base.py", line 289, in _new_process
RuntimeError: Traceback (most recent call last):
File "videotrans\process\stt_fun.py", line 109, in openai_whisper
File "whisper\transcribe.py", line 295, in transcribe
File "whisper\transcribe.py", line 201, in decode_with_fallback
File "torch\utils\_contextlib.py", line 116, in decorate_context

return func(*args, **kwargs)

File "whisper\decoding.py", line 824, in decode
File "torch\utils\_contextlib.py", line 116, in decorate_context

return func(*args, **kwargs)

File "whisper\decoding.py", line 737, in run
File "whisper\decoding.py", line 703, in _main_loop
File "whisper\decoding.py", line 283, in update
File "torch\distributions\categorical.py", line 73, in init

super().__init__(batch_shape, validate_args=validate_args)

File "torch\distributions\distribution.py", line 72, in init

raise ValueError(

ValueError: Expected parameter logits (Tensor of shape (1, 51866)) of distribution Categorical(logits: torch.Size([1, 51866])) to satisfy the constraint IndependentConstraint(Real(), 1), but found invalid values:
tensor([[nan, nan, nan, ..., nan, nan, nan]], device='cuda:0')
TaskCfgSTT(is_cuda=True, uuid='57da8ab8e2', cache_folder='J:/pyvideotrans-v3.99-0508/tmp/9564/57da8ab8e2', target_dir='J:/pyvideotrans-v3.99-0508/output/recogn', source_language=None, source_language_code=None, source_sub=None, source_wav=None, source_wav_output=None, target_language=None, target_language_code=None, target_sub='J:/pyvideotrans-v3.99-0508/output/recogn/第一次直播 (2).srt', target_wav=None, target_wav_output=None, name='J:/第一次直播 (2).mp4', noextname='第一次直播 (2)', basename='第一次直播 (2).mp4', ext='mp4', dirname='J:/', shound_del_name=None, detect_language='zh-cn', recogn_type=1, model_name='large-v3', shibie_audio='J:/pyvideotrans-v3.99-0508/tmp/9564/57da8ab8e2/第一次直播 (2)-1778356683.058531.wav', remove_noise=False, enable_diariz=False, nums_diariz=0, rephrase=0, fix_punc=False)


=
system:Windows-10-10.0.19041-SP0
version:v3.99
frozen:True
language:zh
root_dir:J:/pyvideotrans-v3.99-0508

1 Replies

这是由于GPU计算不稳定或显存不足导致的,模型输出产生了无效值(NaN)。

解决方案

  1. 检查CUDA/cuDNN版本:确保已安装CUDA 12.8+和cuDNN 9.11+。在CMD运行 nvcc -V 查看CUDA版本,nvidia-smi 查看驱动支持的最高CUDA版本。
  2. 临时关闭CUDA加速:在主界面取消勾选“CUDA加速”,改用CPU运行,若问题消失则确认是GPU环境问题。
  3. 降低显卡负载

    • 菜单栏 → 工具/选项高级选项语音识别参数中:

      • CUDA数据类型改为 float16int8
      • beam_size改为 1best_of改为 1
      • 启用上下文感知改为 false
    • 或者换用更小的模型(mediumsmall),large-v3最低需要10GB显存。
  4. 改用faster-whisper:此渠道对显存管理更好,可在主界面语音识别渠道中选择faster-whisper(本地),模型选large-v3medium

请查阅相关文档:

Post Your Reply
Open source and free maintenance is not easy. If this project is helpful to you, please consider making a small donation to help the project continue to maintain and update.

Related resource