Traceback (most recent call last):
File "videotrans\process\stt_fun.py", line 346, in pipe_asr
File "transformers\pipelines\__init__.py", line 1027, in pipeline
framework, model = infer_framework_load_model(File "transformers\pipelines\base.py", line 333, in infer_framework_load_model
raise ValueError(ValueError: Could not load model D:/pyVideoTrans/V3.96/models/models--openai--whisper-large-v2 with any of the following classes: (, , ). See the original errors:
while loading with AutoModelForCTC, an error is thrown:
Traceback (most recent call last):
File "transformers\pipelines\base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)File "transformers\models\auto\auto_factory.py", line 607, in from_pretrained
raise ValueError(ValueError: Unrecognized configuration class for this kind of AutoModel: AutoModelForCTC.
Model type should be one of Data2VecAudioConfig, HubertConfig, MCTCTConfig, Para
......
le "transformers\models\auto\auto_factory.py", line 607, in from_pretrained
raise ValueError(ValueError: Unrecognized configuration class for this kind of AutoModel: AutoModelForCTC.
Model type should be one of Data2VecAudioConfig, HubertConfig, MCTCTConfig, ParakeetCTCConfig, SEWConfig, SEWDConfig, UniSpeechConfig, UniSpeechSatConfig, Wav2Vec2Config, Wav2Vec2BertConfig, Wav2Vec2ConformerConfig, WavLMConfig.
while loading with AutoModelForSpeechSeq2Seq, an error is thrown:
Traceback (most recent call last):
File "transformers\pipelines\base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)File "transformers\models\auto\auto_factory.py", line 604, in from_pretrained
return model_class.from_pretrained(File "transformers\modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)File "transformers\modeling_utils.py", line 4900, in from_pretrained
checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(File "transformers\modeling_utils.py", line 989, in _get_resolved_checkpoint_files
raise OSError(OSError: Error no file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt.index or flax_model.msgpack found in directory D:/pyVideoTrans/V3.96/models/models--openai--whisper-large-v2.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "transformers\pipelines\base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)File "transformers\models\auto\auto_factory.py", line 604, in from_pretrained
return model_class.from_pretrained(File "transformers\modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)File "transformers\modeling_utils.py", line 4900, in from_pretrained
checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(File "transformers\modeling_utils.py", line 989, in _get_resolved_checkpoint_files
raise OSError(OSError: Error no file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt.index or flax_model.msgpack found in directory D:/pyVideoTrans/V3.96/models/models--openai--whisper-large-v2.
while loading with WhisperForConditionalGeneration, an error is thrown:
Traceback (most recent call last):
File "transformers\pipelines\base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)File "transformers\modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)File "transformers\modeling_utils.py", line 4900, in from_pretrained
checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(File "transformers\modeling_utils.py", line 989, in _get_resolved_checkpoint_files
raise OSError(OSError: Error no file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt.index or flax_model.msgpack found in directory D:/pyVideoTrans/V3.96/models/models--openai--whisper-large-v2.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "transformers\pipelines\base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)File "transformers\modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)File "transformers\modeling_utils.py", line 4900, in from_pretrained
checkpoint_files, sharded_metadata = _get_resolved_checkpoint_files(File "transformers\modeling_utils.py", line 989, in _get_resolved_checkpoint_files
raise OSError(OSError: Error no file named pytorch_model.bin, model.safetensors, tf_model.h5, model.ckpt.index or flax_model.msgpack found in directory D:/pyVideoTrans/V3.96/models/models--openai--whisper-large-v2.
=
system:Windows-10-10.0.22631-SP0
version:v3.96
frozen:True
language:zh
root_dir:D:/pyVideoTrans/V3.96