ariG23498 HF Staff commited on
Commit
b8d610b
·
verified ·
1 Parent(s): 3667fe7

Upload Qwen_Qwen3-VL-2B-Instruct_0.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen3-VL-2B-Instruct_0.py +48 -2
Qwen_Qwen3-VL-2B-Instruct_0.py CHANGED
@@ -11,7 +11,30 @@
11
  # ///
12
 
13
  try:
14
- !pip install -U transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  with open('Qwen_Qwen3-VL-2B-Instruct_0.txt', 'w', encoding='utf-8') as f:
16
  f.write('Everything was good in Qwen_Qwen3-VL-2B-Instruct_0.txt')
17
  except Exception as e:
@@ -26,7 +49,30 @@ except Exception as e:
26
  with open('Qwen_Qwen3-VL-2B-Instruct_0.txt', 'a', encoding='utf-8') as f:
27
  import traceback
28
  f.write('''```CODE:
29
- !pip install -U transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  ```
31
 
32
  ERROR:
 
11
  # ///
12
 
13
  try:
14
+ # Load model directly
15
+ from transformers import AutoProcessor, AutoModelForVision2Seq
16
+
17
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Instruct")
18
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-2B-Instruct")
19
+ messages = [
20
+ {
21
+ "role": "user",
22
+ "content": [
23
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
24
+ {"type": "text", "text": "What animal is on the candy?"}
25
+ ]
26
+ },
27
+ ]
28
+ inputs = processor.apply_chat_template(
29
+ messages,
30
+ add_generation_prompt=True,
31
+ tokenize=True,
32
+ return_dict=True,
33
+ return_tensors="pt",
34
+ ).to(model.device)
35
+
36
+ outputs = model.generate(**inputs, max_new_tokens=40)
37
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
38
  with open('Qwen_Qwen3-VL-2B-Instruct_0.txt', 'w', encoding='utf-8') as f:
39
  f.write('Everything was good in Qwen_Qwen3-VL-2B-Instruct_0.txt')
40
  except Exception as e:
 
49
  with open('Qwen_Qwen3-VL-2B-Instruct_0.txt', 'a', encoding='utf-8') as f:
50
  import traceback
51
  f.write('''```CODE:
52
+ # Load model directly
53
+ from transformers import AutoProcessor, AutoModelForVision2Seq
54
+
55
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Instruct")
56
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-2B-Instruct")
57
+ messages = [
58
+ {
59
+ "role": "user",
60
+ "content": [
61
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
62
+ {"type": "text", "text": "What animal is on the candy?"}
63
+ ]
64
+ },
65
+ ]
66
+ inputs = processor.apply_chat_template(
67
+ messages,
68
+ add_generation_prompt=True,
69
+ tokenize=True,
70
+ return_dict=True,
71
+ return_tensors="pt",
72
+ ).to(model.device)
73
+
74
+ outputs = model.generate(**inputs, max_new_tokens=40)
75
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
76
  ```
77
 
78
  ERROR: