Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,7 @@ os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
|
|
| 15 |
DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
|
| 16 |
REVISION = 'v1.0.4'
|
| 17 |
BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
|
| 18 |
-
PUNCTUATION = "
|
| 19 |
uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(Path(tempfile.gettempdir()) / "gradio")
|
| 20 |
tokenizer = None
|
| 21 |
model = None
|
|
@@ -130,25 +130,30 @@ def add_file(history, task_history, file):
|
|
| 130 |
task_history = task_history + [((file_path,), None)]
|
| 131 |
return history, task_history
|
| 132 |
|
| 133 |
-
|
| 134 |
-
def predict(_chatbot, task_history) -> list:
|
| 135 |
print("predict called")
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
| 140 |
|
|
|
|
| 141 |
if isinstance(chat_query, tuple):
|
| 142 |
query = [{'image': chat_query[0]}]
|
| 143 |
else:
|
| 144 |
query = [{'text': _parse_text(chat_query)}]
|
| 145 |
print("Query for model:", query)
|
|
|
|
| 146 |
inputs = tokenizer.from_list_format(query)
|
| 147 |
tokenized_inputs = tokenizer(inputs, return_tensors='pt')
|
| 148 |
tokenized_inputs = tokenized_inputs.to(model.device)
|
|
|
|
| 149 |
pred = model.generate(**tokenized_inputs)
|
| 150 |
response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False)
|
| 151 |
print("Model response:", response)
|
|
|
|
|
|
|
| 152 |
if 'image' in query[0]:
|
| 153 |
image = tokenizer.draw_bbox_on_latest_picture(response)
|
| 154 |
if image is not None:
|
|
@@ -158,8 +163,10 @@ def predict(_chatbot, task_history) -> list:
|
|
| 158 |
_chatbot[-1] = (chat_query, "No image to display.")
|
| 159 |
else:
|
| 160 |
_chatbot[-1] = (chat_query, response)
|
|
|
|
|
|
|
| 161 |
return _chatbot, task_history
|
| 162 |
-
|
| 163 |
def save_uploaded_image(image_file, upload_dir):
|
| 164 |
if image is None:
|
| 165 |
return None
|
|
|
|
| 15 |
DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
|
| 16 |
REVISION = 'v1.0.4'
|
| 17 |
BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
|
| 18 |
+
PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
|
| 19 |
uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(Path(tempfile.gettempdir()) / "gradio")
|
| 20 |
tokenizer = None
|
| 21 |
model = None
|
|
|
|
| 130 |
task_history = task_history + [((file_path,), None)]
|
| 131 |
return history, task_history
|
| 132 |
|
| 133 |
+
def predict(_chatbot, task_history) -> tuple:
|
|
|
|
| 134 |
print("predict called")
|
| 135 |
+
chat_query, chat_response = _chatbot[-1]
|
| 136 |
+
|
| 137 |
+
# Check if the chat query is a tuple (indicating an image), and if so, extract the image path
|
| 138 |
+
if isinstance(chat_query, tuple):
|
| 139 |
+
chat_query = chat_query[0]
|
| 140 |
|
| 141 |
+
# Main logic for processing the query
|
| 142 |
if isinstance(chat_query, tuple):
|
| 143 |
query = [{'image': chat_query[0]}]
|
| 144 |
else:
|
| 145 |
query = [{'text': _parse_text(chat_query)}]
|
| 146 |
print("Query for model:", query)
|
| 147 |
+
|
| 148 |
inputs = tokenizer.from_list_format(query)
|
| 149 |
tokenized_inputs = tokenizer(inputs, return_tensors='pt')
|
| 150 |
tokenized_inputs = tokenized_inputs.to(model.device)
|
| 151 |
+
|
| 152 |
pred = model.generate(**tokenized_inputs)
|
| 153 |
response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False)
|
| 154 |
print("Model response:", response)
|
| 155 |
+
|
| 156 |
+
# Handling the response based on the type of query (image or text)
|
| 157 |
if 'image' in query[0]:
|
| 158 |
image = tokenizer.draw_bbox_on_latest_picture(response)
|
| 159 |
if image is not None:
|
|
|
|
| 163 |
_chatbot[-1] = (chat_query, "No image to display.")
|
| 164 |
else:
|
| 165 |
_chatbot[-1] = (chat_query, response)
|
| 166 |
+
|
| 167 |
+
# Return the updated chatbot and task history
|
| 168 |
return _chatbot, task_history
|
| 169 |
+
|
| 170 |
def save_uploaded_image(image_file, upload_dir):
|
| 171 |
if image is None:
|
| 172 |
return None
|