ariG23498 HF Staff commited on
Commit
3adcf93
·
verified ·
1 Parent(s): 83be0f1

Upload Qwen_Qwen3.5-9B-Base_1.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen3.5-9B-Base_1.txt +44 -0
Qwen_Qwen3.5-9B-Base_1.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Load model directly
3
+ from transformers import AutoProcessor, AutoModelForImageTextToText
4
+
5
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3.5-9B-Base")
6
+ model = AutoModelForImageTextToText.from_pretrained("Qwen/Qwen3.5-9B-Base")
7
+ messages = [
8
+ {
9
+ "role": "user",
10
+ "content": [
11
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
12
+ {"type": "text", "text": "What animal is on the candy?"}
13
+ ]
14
+ },
15
+ ]
16
+ inputs = processor.apply_chat_template(
17
+ messages,
18
+ add_generation_prompt=True,
19
+ tokenize=True,
20
+ return_dict=True,
21
+ return_tensors="pt",
22
+ ).to(model.device)
23
+
24
+ outputs = model.generate(**inputs, max_new_tokens=40)
25
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
26
+ ```
27
+
28
+ ERROR:
29
+ Traceback (most recent call last):
30
+ File "/tmp/Qwen_Qwen3.5-9B-Base_1Ll1VoC.py", line 37, in <module>
31
+ inputs = processor.apply_chat_template(
32
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
33
+ messages,
34
+ ^^^^^^^^^
35
+ ...<3 lines>...
36
+ return_tensors="pt",
37
+ ^^^^^^^^^^^^^^^^^^^^
38
+ ).to(model.device)
39
+ ^
40
+ File "/tmp/.cache/uv/environments-v2/4ce060491032d9c1/lib/python3.13/site-packages/transformers/processing_utils.py", line 1666, in apply_chat_template
41
+ raise ValueError(
42
+ "Cannot use apply_chat_template because this processor does not have a chat template."
43
+ )
44
+ ValueError: Cannot use apply_chat_template because this processor does not have a chat template.