text
stringlengths
0
3.84k
cached_folder = cls.download(
pretrained_model_name_or_path,
...<14 lines>...
**kwargs,
)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
return fn(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/diffusers/pipelines/pipeline_utils.py", line 1485, in download
config_file = hf_hub_download(
pretrained_model_name,
...<5 lines>...
token=token,
)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
return fn(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1010, in hf_hub_download
return _hf_hub_download_to_cache_dir(
# Destination
...<14 lines>...
force_download=force_download,
)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1073, in _hf_hub_download_to_cache_dir
(url_to_download, etag, commit_hash, expected_size, xet_file_data, head_call_error) = _get_metadata_or_catch_error(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
repo_id=repo_id,
^^^^^^^^^^^^^^^^
...<10 lines>...
relative_filename=relative_filename,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1546, in _get_metadata_or_catch_error
metadata = get_hf_file_metadata(
url=url, proxies=proxies, timeout=etag_timeout, headers=headers, token=token, endpoint=endpoint
)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
return fn(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 1463, in get_hf_file_metadata
r = _request_wrapper(
method="HEAD",
...<5 lines>...
timeout=timeout,
)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 286, in _request_wrapper
response = _request_wrapper(
method=method,
...<2 lines>...
**params,
)
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 310, in _request_wrapper
hf_raise_for_status(response)
~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_http.py", line 420, in hf_raise_for_status
raise _format(EntryNotFoundError, message, response) from e
huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-6895ca0d-3fda83bb49f04b595769150f;9291fb74-2aac-4bee-abda-9269c198532b)
Entry Not Found for url: https://huggingface.co/X-Omni/X-Omni-En/resolve/main/model_index.json.
Traceback (most recent call last):
File "/tmp/YannQi_R-4B_0eAMXFw.py", line 13, in <module>
pipe = pipeline("image-text-to-text", model="YannQi/R-4B", trust_remote_code=True)
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline
framework, model = infer_framework_load_model(
~~~~~~~~~~~~~~~~~~~~~~~~~~^
adapter_path if adapter_path is not None else model,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
...<5 lines>...
**model_kwargs,
^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
raise ValueError(
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
)
ValueError: Could not load model YannQi/R-4B with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>,). See the original errors:
while loading with AutoModelForImageTextToText, an error is thrown:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
raise ValueError(
...<2 lines>...
)
ValueError: Unrecognized configuration class <class 'transformers_modules.YannQi.R-4B.0fbbf22db6867d9ac260d8181ac655fd915c9415.configuration_r.RConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
model = model_class.from_pretrained(model, **fp32_kwargs)
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
raise ValueError(
...<2 lines>...
)
ValueError: Unrecognized configuration class <class 'transformers_modules.YannQi.R-4B.0fbbf22db6867d9ac260d8181ac655fd915c9415.configuration_r.RConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.