Upload YannQi_R-4B_0.txt with huggingface_hub
Browse files- YannQi_R-4B_0.txt +2 -2
YannQi_R-4B_0.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
Traceback (most recent call last):
|
| 2 |
-
File "/tmp/YannQi_R-
|
| 3 |
pipe = pipeline("image-text-to-text", model="YannQi/R-4B", trust_remote_code=True)
|
| 4 |
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline
|
| 5 |
framework, model = infer_framework_load_model(
|
|
@@ -25,7 +25,7 @@ Traceback (most recent call last):
|
|
| 25 |
raise ValueError(
|
| 26 |
...<2 lines>...
|
| 27 |
)
|
| 28 |
-
ValueError: Unrecognized configuration class <class 'transformers_modules.YannQi.R-4B.
|
| 29 |
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
| 30 |
|
| 31 |
|
|
|
|
| 1 |
Traceback (most recent call last):
|
| 2 |
+
File "/tmp/YannQi_R-4B_0NKtXIB.py", line 13, in <module>
|
| 3 |
pipe = pipeline("image-text-to-text", model="YannQi/R-4B", trust_remote_code=True)
|
| 4 |
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline
|
| 5 |
framework, model = infer_framework_load_model(
|
|
|
|
| 25 |
raise ValueError(
|
| 26 |
...<2 lines>...
|
| 27 |
)
|
| 28 |
+
ValueError: Unrecognized configuration class <class 'transformers_modules.YannQi.R-4B.0fbbf22db6867d9ac260d8181ac655fd915c9415.configuration_r.RConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
| 29 |
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
| 30 |
|
| 31 |
|