text stringlengths 0 3.84k |
|---|
~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
adapter_path if adapter_path is not None else model, |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
...<5 lines>... |
**model_kwargs, |
^^^^^^^^^^^^^^^ |
) |
^ |
File "/tmp/.cache/uv/environments-v2/e8b6185b99c36290/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model |
raise ValueError( |
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" |
) |
ValueError: Could not load model Salesforce/CoDA-v0-Instruct with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>,). See the original errors: |
while loading with AutoModelForCausalLM, an error is thrown: |
Traceback (most recent call last): |
File "/tmp/.cache/uv/environments-v2/e8b6185b99c36290/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model |
model = model_class.from_pretrained(model, **kwargs) |
File "/tmp/.cache/uv/environments-v2/e8b6185b99c36290/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained |
raise ValueError( |
...<2 lines>... |
) |
ValueError: Unrecognized configuration class <class 'transformers_modules.Salesforce.CoDA_hyphen_v0_hyphen_Instruct.f2d40ce1282766e2c9fbf5ca62c611817164a246.model_config.CoDAConfig'> for this kind of AutoModel: AutoModelForCausalLM. |
Model type should be one of ApertusConfig, ArceeConfig, AriaTextConfig, BambaConfig, BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BitNetConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, BltConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CohereConfig, ... |
During handling of the above exception, another exception occurred: |
Traceback (most recent call last): |
File "/tmp/.cache/uv/environments-v2/e8b6185b99c36290/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model |
model = model_class.from_pretrained(model, **fp32_kwargs) |
File "/tmp/.cache/uv/environments-v2/e8b6185b99c36290/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained |
raise ValueError( |
...<2 lines>... |
) |
ValueError: Unrecognized configuration class <class 'transformers_modules.Salesforce.CoDA_hyphen_v0_hyphen_Instruct.f2d40ce1282766e2c9fbf5ca62c611817164a246.model_config.CoDAConfig'> for this kind of AutoModel: AutoModelForCausalLM. |
Model type should be one of ApertusConfig, ArceeConfig, AriaTextConfig, BambaConfig, BartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BioGptConfig, BitNetConfig, BlenderbotConfig, BlenderbotSmallConfig, BloomConfig, BltConfig, CamembertConfig, LlamaConfig, CodeGenConfig, CohereConfig, ... |
Traceback (most recent call last): |
File "/tmp/Salesforce_CoDA-v0-Instruct_1xKCm9o.py", line 15, in <module> |
model = AutoModel.from_pretrained("Salesforce/CoDA-v0-Instruct", trust_remote_code=True, torch_dtype="auto") |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 597, in from_pretrained |
return model_class.from_pretrained( |
~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
) |
^ |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper |
return func(*args, **kwargs) |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4971, in from_pretrained |
model = cls(config, *model_args, **model_kwargs) |
File "/tmp/.cache/huggingface/modules/transformers_modules/Salesforce/CoDA_hyphen_v0_hyphen_Instruct/f2d40ce1282766e2c9fbf5ca62c611817164a246/modeling_coda.py", line 372, in __init__ |
self.model = CoDAModel(config) |
~~~~~~~~~^^^^^^^^ |
File "/tmp/.cache/huggingface/modules/transformers_modules/Salesforce/CoDA_hyphen_v0_hyphen_Instruct/f2d40ce1282766e2c9fbf5ca62c611817164a246/modeling_coda.py", line 268, in __init__ |
super().__init__(config=config) |
~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^ |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 2076, in __init__ |
self.config._attn_implementation_internal = self._check_and_adjust_attn_implementation( |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^ |
self.config._attn_implementation, is_init_check=True |
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
) |
^ |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 2686, in _check_and_adjust_attn_implementation |
applicable_attn_implementation = self.get_correct_attn_implementation( |
applicable_attn_implementation, is_init_check |
) |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 2725, in get_correct_attn_implementation |
raise e |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 2722, in get_correct_attn_implementation |
self._sdpa_can_dispatch(is_init_check) |
~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^ |
File "/tmp/.cache/uv/environments-v2/f5cacc24669b9318/lib/python3.13/site-packages/transformers/modeling_utils.py", line 2574, in _sdpa_can_dispatch |
raise ValueError( |
...<3 lines>... |
) |
ValueError: CoDAModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub rep... |
No suitable GPU found for Skywork/Skywork-R1V3-38B | 92.95 GB VRAM requirement |
No suitable GPU found for Skywork/Skywork-R1V3-38B | 92.95 GB VRAM requirement |
No suitable GPU found for Skywork/Skywork-R1V3-38B | 92.95 GB VRAM requirement |
No suitable GPU found for Skywork/Skywork-R1V3-38B | 92.95 GB VRAM requirement |
Traceback (most recent call last): |
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_http.py", line 409, in hf_raise_for_status |
response.raise_for_status() |
~~~~~~~~~~~~~~~~~~~~~~~~~^^ |
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/requests/models.py", line 1026, in raise_for_status |
raise HTTPError(http_error_msg, response=self) |
requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/X-Omni/X-Omni-En/resolve/main/model_index.json |
The above exception was the direct cause of the following exception: |
Traceback (most recent call last): |
File "/tmp/X-Omni_X-Omni-En_03hp8xD.py", line 18, in <module> |
pipe = DiffusionPipeline.from_pretrained("X-Omni/X-Omni-En") |
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn |
return fn(*args, **kwargs) |
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/diffusers/pipelines/pipeline_utils.py", line 833, in from_pretrained |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.