Emeritus-21 commited on
Commit
f3b428e
·
verified ·
1 Parent(s): 60cad4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -18
app.py CHANGED
@@ -69,26 +69,12 @@ def warmup(progress=gr.Progress(track_tqdm=True)):
69
  def _build_inputs(processor, tokenizer, image: Image.Image, prompt: str):
70
  messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": prompt}]}]
71
 
72
- # We explicitly set max_length and truncation here to resolve the token mismatch error.
73
- # A value of 2048 is safe, as an image takes up ~1024 tokens.
74
- max_len_val = 2048
75
-
76
  if tokenizer and hasattr(tokenizer, "apply_chat_template"):
77
  chat_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
78
- return processor(
79
- text=[chat_prompt],
80
- images=[image],
81
- return_tensors="pt",
82
- max_length=max_len_val,
83
- truncation=True
84
- )
85
- return processor(
86
- text=[prompt],
87
- images=[image],
88
- return_tensors="pt",
89
- max_length=max_len_val,
90
- truncation=True
91
- )
92
 
93
  def _decode_text(model, processor, tokenizer, output_ids, prompt: str):
94
  try:
 
69
  def _build_inputs(processor, tokenizer, image: Image.Image, prompt: str):
70
  messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": prompt}]}]
71
 
 
 
 
 
72
  if tokenizer and hasattr(tokenizer, "apply_chat_template"):
73
  chat_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
74
+ return processor(text=[chat_prompt], images=[image], return_tensors="pt")
75
+
76
+ return processor(text=[prompt], images=[image], return_tensors="pt")
77
+
 
 
 
 
 
 
 
 
 
 
78
 
79
  def _decode_text(model, processor, tokenizer, output_ids, prompt: str):
80
  try: