File size: 12,078 Bytes
a289fe4
 
f4e3c36
9ca4e02
27a259f
 
a289fe4
6b8dd5f
0a902de
 
 
 
a289fe4
 
92d639d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d99e5e0
 
 
b33a74e
 
d99e5e0
b33a74e
 
 
0a902de
92d639d
 
 
 
 
0a902de
a289fe4
 
9ca4e02
 
 
 
 
 
 
f4e3c36
 
 
 
a289fe4
 
27a259f
92d639d
a289fe4
ebc87be
 
 
27a259f
9ca4e02
 
 
 
 
 
 
 
 
 
ebc87be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ca4e02
 
 
 
 
 
 
 
27a259f
 
 
 
9ca4e02
 
27a259f
 
9ca4e02
27a259f
 
 
9ca4e02
27a259f
 
9ca4e02
 
27a259f
9ca4e02
27a259f
 
9ca4e02
27a259f
 
 
 
 
 
 
9ca4e02
27a259f
 
 
 
 
9ca4e02
27a259f
 
 
 
9ca4e02
27a259f
 
 
9ca4e02
27a259f
 
92d639d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a289fe4
 
9ca4e02
 
 
 
 
 
 
 
27a259f
9ca4e02
 
 
 
 
 
 
27a259f
 
 
9ca4e02
27a259f
 
 
 
 
 
9ca4e02
 
 
 
 
 
 
 
 
27a259f
9ca4e02
 
 
 
 
 
 
 
 
 
 
 
 
27a259f
9ca4e02
 
27a259f
9ca4e02
 
 
 
 
 
 
 
 
 
 
27a259f
 
 
9ca4e02
 
 
 
 
 
27a259f
 
 
 
9ca4e02
 
 
 
 
 
 
a289fe4
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
import torch, os, base64, io, logging, time
from typing import Any, Dict, List, Tuple
from PIL import Image

MODEL_ID = "osunlp/UGround-V1-72B"
CACHE_DIR = (
    os.environ.get("HF_HUB_CACHE")
    or os.environ.get("HF_HOME")
    or "/data/huggingface"
)

# PyTorch performance settings
# 1) Ensure CUDA kernel cache directory is writable/persistent to avoid recompilation stalls
KERNEL_CACHE_DIR = os.environ.get("PYTORCH_KERNEL_CACHE_PATH", "/tmp/torch_kernels")
os.environ["PYTORCH_KERNEL_CACHE_PATH"] = KERNEL_CACHE_DIR
try:
    os.makedirs(KERNEL_CACHE_DIR, exist_ok=True)
except Exception:
    pass

# 2) Enable TF32 for faster matmul on Ampere+ GPUs (minimal quality impact)
try:
    torch.backends.cuda.matmul.allow_tf32 = True  # type: ignore[attr-defined]
    torch.backends.cudnn.allow_tf32 = True  # type: ignore[attr-defined]
    torch.set_float32_matmul_precision("high")  # type: ignore[attr-defined]
except Exception:
    pass

processor = AutoProcessor.from_pretrained(
    MODEL_ID, trust_remote_code=True, cache_dir=CACHE_DIR, use_fast=False
)
model = Qwen2VLForConditionalGeneration.from_pretrained(
    MODEL_ID,
    dtype=torch.bfloat16,
    device_map="auto",
    trust_remote_code=True,
    cache_dir=CACHE_DIR,
)
model.eval()
try:
    torch.set_grad_enabled(False)
except Exception:
    pass

app = FastAPI()

# Configure basic logging for debugging
logging.basicConfig(
    level=logging.DEBUG,
    format="%(asctime)s %(levelname)s %(name)s: %(message)s"
)
logger = logging.getLogger(__name__)

@app.get("/")
async def root():
    return {"status": "ok"}

class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[Dict[str, Any]]
    max_tokens: int = 256

MAX_IMAGE_WIDTH = 512
MAX_IMAGE_HEIGHT = 388

def _decode_base64_image(data_url: str) -> Image.Image:
    try:
        is_data_url = data_url.startswith("data:")
        if is_data_url:
            header, b64data = data_url.split(",", 1)
            logger.debug("Decoding image from data URL; header prefix=%r", header[:50])
        else:
            b64data = data_url
            logger.debug("Decoding image from raw base64 string; length=%d", len(b64data))
        img_bytes = base64.b64decode(b64data)
        img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
        orig_w, orig_h = img.width, img.height
        # Downscale if larger than bounds, preserving aspect ratio
        if orig_w > MAX_IMAGE_WIDTH or orig_h > MAX_IMAGE_HEIGHT:
            target = (MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT)
            img = img.copy()
            img.thumbnail(target, Image.LANCZOS)
            logger.debug(
                "Resized image from %sx%s to %sx%s (bounds %sx%s)",
                orig_w,
                orig_h,
                img.width,
                img.height,
                MAX_IMAGE_WIDTH,
                MAX_IMAGE_HEIGHT,
            )
        try:
            logger.debug("Decoded image: size=%sx%s mode=%s", img.width, img.height, img.mode)
        except Exception:
            logger.debug("Decoded image but could not log image metadata")
        return img
    except Exception:
        logger.exception("Failed to decode base64 image")
        raise

def _to_qwen_messages_and_images(messages: List[Dict[str, Any]]) -> Tuple[List[Dict[str, Any]], List[Any]]:
    qwen_msgs: List[Dict[str, Any]] = []
    images: List[Any] = []
    logger.debug("Begin parsing messages: count=%d", len(messages) if messages else 0)
    for idx, msg in enumerate(messages):
        role = msg.get("role", "user")
        content = msg.get("content")
        logger.debug("Processing message #%d role=%s content_type=%s", idx, role, type(content).__name__)
        q_content: List[Dict[str, Any]] = []

        if isinstance(content, str):
            logger.debug("Message #%d text length=%d", idx, len(content))
            q_content.append({"type": "text", "text": content})
        elif isinstance(content, list):
            logger.debug("Message #%d has %d content parts", idx, len(content))
            for pidx, part in enumerate(content):
                ptype = part.get("type")
                logger.debug("Part #%d type=%s", pidx, ptype)
                if ptype == "text":
                    text_val = part.get("text") or part.get("content") or ""
                    logger.debug("Part #%d text length=%d", pidx, len(text_val))
                    q_content.append({"type": "text", "text": text_val})
                elif ptype in ("image", "image_url"):
                    # OpenAI style: {type:"image_url", image_url:{url:"..."}}
                    url = part.get("image")
                    if url is None and isinstance(part.get("image_url"), dict):
                        url = part["image_url"].get("url")
                    if isinstance(url, str) and url.startswith("data:image"):
                        logger.debug("Part #%d image provided as base64 data URL", pidx)
                        img = _decode_base64_image(url)
                        images.append(img)
                        q_content.append({"type": "image", "image": img})
                    else:
                        # URL or non-base64 string
                        logger.debug("Part #%d image provided as URL or non-base64 string: %s", pidx, str(url)[:200])
                        images.append(url)
                        q_content.append({"type": "image", "image": url})
        else:
            # Unknown content; coerce to text
            logger.debug("Message #%d unknown content type; coercing to text", idx)
            q_content.append({"type": "text", "text": str(content)})

        qwen_msgs.append({"role": role, "content": q_content})
    logger.debug("Finished parsing messages: qwen_msgs=%d images=%d", len(qwen_msgs), len(images))
    return qwen_msgs, images

def _make_tiny_base64_png(size: Tuple[int, int] = (64, 48), color: Tuple[int, int, int] = (128, 128, 128)) -> str:
    buf = io.BytesIO()
    Image.new("RGB", size, color).save(buf, format="PNG")
    data = base64.b64encode(buf.getvalue()).decode("ascii")
    return f"data:image/png;base64,{data}"

@app.on_event("startup")
async def _startup_warmup():
    if os.environ.get("DISABLE_WARMUP", "0") == "1":
        logger.info("Warmup disabled via DISABLE_WARMUP=1")
        return
    try:
        logger.info("Warmup start: compiling kernels (text + tiny image)")
        # Text-only warmup
        text_msgs: List[Dict[str, Any]] = [
            {"role": "user", "content": "Hello"}
        ]
        qmsgs_t, _ = _to_qwen_messages_and_images(text_msgs)
        prompt_t = processor.apply_chat_template(qmsgs_t, tokenize=False, add_generation_prompt=True)
        inputs_t = processor(text=[prompt_t], images=None, padding=True, return_tensors="pt")
        inputs_t = inputs_t.to(model.device)
        _t0 = time.perf_counter()
        with torch.no_grad():
            _ = model.generate(**inputs_t, max_new_tokens=int(os.environ.get("WARMUP_MAX_NEW_TOKENS", "4")), max_time=float(os.environ.get("WARMUP_MAX_TIME_SECONDS", "3")))
        logger.info("Text warmup done in %.1f ms", (time.perf_counter() - _t0) * 1000.0)

        # Tiny image + text warmup
        tiny_url = _make_tiny_base64_png()
        viz_msgs: List[Dict[str, Any]] = [
            {"role": "user", "content": [
                {"type": "text", "text": "Describe the image"},
                {"type": "image_url", "image_url": {"url": tiny_url}},
            ]}
        ]
        qmsgs_v, images_v = _to_qwen_messages_and_images(viz_msgs)
        prompt_v = processor.apply_chat_template(qmsgs_v, tokenize=False, add_generation_prompt=True)
        inputs_v = processor(text=[prompt_v], images=images_v, padding=True, return_tensors="pt")
        inputs_v = inputs_v.to(model.device)
        _t1 = time.perf_counter()
        with torch.no_grad():
            _ = model.generate(**inputs_v, max_new_tokens=int(os.environ.get("WARMUP_MAX_NEW_TOKENS", "4")), max_time=float(os.environ.get("WARMUP_MAX_TIME_SECONDS", "3")))
        logger.info("Vision warmup done in %.1f ms", (time.perf_counter() - _t1) * 1000.0)

        logger.info("Warmup complete")
    except Exception:
        logger.exception("Warmup failed")

@app.post("/v1/chat/completions")
async def chat_completions(req: ChatCompletionRequest):
    logger.debug(
        "Request received: model=%s, max_tokens=%s, message_count=%d",
        req.model,
        req.max_tokens,
        len(req.messages) if req.messages is not None else 0,
    )
    if req.messages:
        logger.debug("First message preview: %s", str(req.messages[0])[:300])
    qwen_messages, image_inputs = _to_qwen_messages_and_images(req.messages)
    logger.debug(
        "Converted messages: qwen_count=%d, images_count=%d",
        len(qwen_messages),
        len(image_inputs) if image_inputs is not None else 0,
    )
    if qwen_messages:
        logger.debug("First qwen message preview: %s", str(qwen_messages[0])[:300])
    prompt_text = processor.apply_chat_template(
        qwen_messages, tokenize=False, add_generation_prompt=True
    )
    logger.debug("Prompt length (chars)=%d; preview=%r", len(prompt_text), prompt_text[:200])
    inputs = processor(
        text=[prompt_text],
        images=image_inputs if image_inputs else None,
        padding=True,
        return_tensors="pt",
    )
    try:
        tensor_info_pre = {
            k: (tuple(v.shape), str(getattr(v, "dtype", "<na>")))
            for k, v in inputs.items()
            if hasattr(v, "shape")
        }
        logger.debug("Processor outputs (pre .to): %s", tensor_info_pre)
    except Exception:
        logger.debug("Could not summarize processor outputs before device move")
    inputs = inputs.to(model.device)
    try:
        tensor_info_post = {
            k: (
                tuple(v.shape),
                str(getattr(v, "dtype", "<na>")),
                str(getattr(v, "device", "<na>")),
            )
            for k, v in inputs.items()
            if torch.is_tensor(v)
        }
        logger.debug("Inputs moved to device=%s; tensor_info=%s", getattr(model, "device", "<unknown>"), tensor_info_post)
    except Exception:
        logger.debug("Could not summarize inputs after device move")

    logger.debug("Starting generation: max_new_tokens=%d", req.max_tokens)
    _t0 = time.perf_counter()
    generated_ids = model.generate(**inputs, max_new_tokens=req.max_tokens)
    _elapsed_ms = (time.perf_counter() - _t0) * 1000.0
    try:
        logger.debug(
            "Generation done in %.1f ms; generated_ids shape=%s dtype=%s device=%s",
            _elapsed_ms,
            tuple(generated_ids.shape) if hasattr(generated_ids, "shape") else "<na>",
            str(getattr(generated_ids, "dtype", "<na>")),
            str(getattr(generated_ids, "device", "<na>")),
        )
    except Exception:
        logger.debug("Could not summarize generated_ids")
    trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    try:
        lengths_in = [row.size(0) for row in inputs.input_ids]
        lengths_out = [row.size(0) for row in generated_ids]
        logger.debug("Token lengths: input=%s, output=%s", lengths_in, lengths_out)
    except Exception:
        logger.debug("Could not compute token length summaries")
    output_texts = processor.batch_decode(
        trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    text = output_texts[0] if output_texts else ""
    logger.debug(
        "Decoded %d sequences; first_text_len=%d",
        len(output_texts),
        len(text) if text else 0,
    )
    if text:
        logger.debug("Output preview: %r", text[:500])

    return {
        "id": "chatcmpl-uground72b",
        "object": "chat.completion",
        "choices": [{
            "index": 0,
            "message": {"role": "assistant", "content": text},
            "finish_reason": "stop"
        }]
    }