text stringlengths 0 3.84k |
|---|
...<8 lines>... |
repo_type=repo_type, |
) |
File "/tmp/.cache/uv/environments-v2/899a4ca09b916084/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 432, in get_cached_module_file |
get_cached_module_file( |
~~~~~~~~~~~~~~~~~~~~~~^ |
pretrained_model_name_or_path, |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
...<8 lines>... |
_commit_hash=commit_hash, |
^^^^^^^^^^^^^^^^^^^^^^^^^ |
) |
^ |
File "/tmp/.cache/uv/environments-v2/899a4ca09b916084/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 392, in get_cached_module_file |
modules_needed = check_imports(resolved_module_file) |
File "/tmp/.cache/uv/environments-v2/899a4ca09b916084/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 224, in check_imports |
raise ImportError( |
...<2 lines>... |
) |
ImportError: This modeling file requires the following packages that were not found in your environment: einops, timm. Run `pip install einops timm` |
No suitable GPU found for baidu/Qianfan-VL-70B | 173.95 GB VRAM requirement |
No suitable GPU found for baidu/Qianfan-VL-70B | 173.95 GB VRAM requirement |
Traceback (most recent call last): |
File "/tmp/baidu_Qianfan-VL-8B_0mFVLxP.py", line 16, in <module> |
pipe = pipeline("image-text-to-text", model="baidu/Qianfan-VL-8B", trust_remote_code=True) |
File "/tmp/.cache/uv/environments-v2/8f413d6cb82e6358/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline |
framework, model = infer_framework_load_model( |
~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
adapter_path if adapter_path is not None else model, |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
...<5 lines>... |
**model_kwargs, |
^^^^^^^^^^^^^^^ |
) |
^ |
File "/tmp/.cache/uv/environments-v2/8f413d6cb82e6358/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model |
raise ValueError( |
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" |
) |
ValueError: Could not load model baidu/Qianfan-VL-8B with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>,). See the original errors: |
while loading with AutoModelForImageTextToText, an error is thrown: |
Traceback (most recent call last): |
File "/tmp/.cache/uv/environments-v2/8f413d6cb82e6358/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model |
model = model_class.from_pretrained(model, **kwargs) |
File "/tmp/.cache/uv/environments-v2/8f413d6cb82e6358/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained |
raise ValueError( |
...<2 lines>... |
) |
ValueError: Unrecognized configuration class <class 'transformers_modules.baidu.Qianfan-VL-8B.70da8ff21d2fb3a568ea313120be56c0ade45457.configuration_qianfanvl_chat.QianfanVLChatConfig'> for this kind of AutoModel: AutoModelForImageTextToText. |
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idef... |
During handling of the above exception, another exception occurred: |
Traceback (most recent call last): |
File "/tmp/.cache/uv/environments-v2/8f413d6cb82e6358/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model |
model = model_class.from_pretrained(model, **fp32_kwargs) |
File "/tmp/.cache/uv/environments-v2/8f413d6cb82e6358/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained |
raise ValueError( |
...<2 lines>... |
) |
ValueError: Unrecognized configuration class <class 'transformers_modules.baidu.Qianfan-VL-8B.70da8ff21d2fb3a568ea313120be56c0ade45457.configuration_qianfanvl_chat.QianfanVLChatConfig'> for this kind of AutoModel: AutoModelForImageTextToText. |
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idef... |
Traceback (most recent call last): |
File "/tmp/baidu_Qianfan-VL-8B_1BCwleH.py", line 15, in <module> |
model = AutoModel.from_pretrained("baidu/Qianfan-VL-8B", trust_remote_code=True, torch_dtype="auto") |
File "/tmp/.cache/uv/environments-v2/65c87e3bb5bf1ca1/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 586, in from_pretrained |
model_class = get_class_from_dynamic_module( |
class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs |
) |
File "/tmp/.cache/uv/environments-v2/65c87e3bb5bf1ca1/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 569, in get_class_from_dynamic_module |
final_module = get_cached_module_file( |
repo_id, |
...<8 lines>... |
repo_type=repo_type, |
) |
File "/tmp/.cache/uv/environments-v2/65c87e3bb5bf1ca1/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 432, in get_cached_module_file |
get_cached_module_file( |
~~~~~~~~~~~~~~~~~~~~~~^ |
pretrained_model_name_or_path, |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
...<8 lines>... |
_commit_hash=commit_hash, |
^^^^^^^^^^^^^^^^^^^^^^^^^ |
) |
^ |
File "/tmp/.cache/uv/environments-v2/65c87e3bb5bf1ca1/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 392, in get_cached_module_file |
modules_needed = check_imports(resolved_module_file) |
File "/tmp/.cache/uv/environments-v2/65c87e3bb5bf1ca1/lib/python3.13/site-packages/transformers/dynamic_module_utils.py", line 224, in check_imports |
raise ImportError( |
...<2 lines>... |
) |
ImportError: This modeling file requires the following packages that were not found in your environment: einops, timm. Run `pip install einops timm` |
No suitable GPU found for deepcogito/cogito-v2-preview-deepseek-671B-MoE | 1624.83 GB VRAM requirement |
No suitable GPU found for deepcogito/cogito-v2-preview-deepseek-671B-MoE | 1624.83 GB VRAM requirement |
No suitable GPU found for deepseek-ai/DeepSeek-R1-0528 | 1657.55 GB VRAM requirement |
No suitable GPU found for deepseek-ai/DeepSeek-R1-0528 | 1657.55 GB VRAM requirement |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.