File size: 13,118 Bytes
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
061bb6a
dc178ef
 
6d75145
 
 
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
061bb6a
dc178ef
061bb6a
 
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d75145
 
 
 
 
 
 
 
 
dc178ef
 
 
 
 
 
061bb6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d75145
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
061bb6a
dc178ef
 
 
 
 
 
 
 
061bb6a
dc178ef
 
 
 
 
 
 
061bb6a
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
9587ab9
dc178ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
061bb6a
dc178ef
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import torch
import numpy as np
from PIL import Image
from transformers import (
    Qwen2_5_VLForConditionalGeneration,
    AutoProcessor,
    Qwen3VLForConditionalGeneration,
    Qwen3VLMoeForConditionalGeneration,
)
from qwen_vl_utils import process_vision_info


def parse_args():
    parser = argparse.ArgumentParser(
        description="Enhance a prompt with CoT reasoning given an input image and prompt"
    )
    parser.add_argument(
        "--input-image",
        type=str,
        default="./assets/images/input.jpg",
        help="Path to the input image (default: ./assets/images/input.jpg)"
    )
    parser.add_argument(
        "--input-prompt",
        type=str,
        required=True,
        help="Input prompt to enhance with CoT reasoning"
    )
    parser.add_argument(
        "--model",
        type=str,
        default="Qwen/Qwen3-VL-30B-A3B-Instruct",
        choices=[
            "Qwen/Qwen2.5-VL-7B-Instruct",
            "Qwen/Qwen3-VL-30B-A3B-Instruct",
        ],
        help="Model to use for prompt enhancement"
    )
    parser.add_argument(
        "--max-area",
        type=int,
        default=720*1280,
        help="Maximum area for image resizing (default: 720*1280)"
    )
    return parser.parse_args()


def pick_attn_implementation(prefer_flash: bool = True) -> str:
    """
    Decide the best attn_implementation based on environment.

    Returns one of: "flash_attention_2", "sdpa", "eager".
    """
    # Try FlashAttention v2 first (needs SM80+ and the wheel to import)
    if prefer_flash:
        try:
            import flash_attn  # noqa: F401
            if torch.cuda.is_available():
                major, minor = torch.cuda.get_device_capability()
                # FlashAttn requires Ampere (SM80) or newer
                if (major, minor) >= (8, 0):
                    return "flash_attention_2"
        except Exception:
            pass
    try:
        if torch.backends.cuda.sdp_kernel.is_available():
            return "sdpa"
    except Exception:
        pass

    # Fallback: eager (always works, slower)
    return "eager"
def load_model(model_name):
    """Load the vision-language model and processor."""
    print(f"Loading model: {model_name}")

    attn_impl = pick_attn_implementation(prefer_flash=True)

    if model_name == "Qwen/Qwen2.5-VL-7B-Instruct":
        model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
            model_name,
            dtype=torch.bfloat16,
            attn_implementation=attn_impl,
            device_map="auto",
        )
        processor = AutoProcessor.from_pretrained(model_name)
    
    elif model_name == "Qwen/Qwen3-VL-30B-A3B-Instruct":
        model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
            model_name, 
            dtype=torch.bfloat16,
            attn_implementation=attn_impl,
            device_map="auto"
        )
        processor = AutoProcessor.from_pretrained(model_name)

    elif model_name == "Qwen/Qwen3-VL-8B-Instruct":
        model = Qwen3VLForConditionalGeneration.from_pretrained(
            model_name,
            dtype=torch.bfloat16,
            attn_implementation=attn_impl,
            device_map="auto"
        )
        processor = AutoProcessor.from_pretrained(model_name)
    
    else:
        raise ValueError(f"Unsupported model: {model_name}")

    return model, processor


def resize_if_needed(image, max_area):
    """
    Resize the image so that its area does not exceed max_area pixels,
    while maintaining the aspect ratio.

    Args:
        image (PIL.Image): The input image.
        max_area (int): The maximum allowed area (in pixels).
    Returns:
        PIL.Image: The resized (or original) image.
    """

    orig_width, orig_height = image.width, image.height
    orig_area = orig_width * orig_height

    if orig_area <= max_area:
        # No need to resize
        return image

    # Calculate dimensions maintaining aspect ratio
    aspect_ratio = orig_height / orig_width

    # Compute new dimensions so that width * height <= max_area
    # Derived from: new_width * new_height = max_area and new_height = aspect_ratio * new_width
    new_width = int(round(np.sqrt(max_area / aspect_ratio)))
    new_height = int(round(new_width * aspect_ratio))

    print(f"Resizing image from {image.size} to {(new_width, new_height)}")

    resized_image = image.resize((new_width, new_height), Image.LANCZOS)
    return resized_image


def _run_model_inference(messages, model, processor):
    """
    Helper function to run model inference.
    
    Args:
        messages: Chat messages for the model
        model: The loaded VL model
        processor: The model's processor
        
    Returns:
        str: Generated text
    """
    if isinstance(model, Qwen2_5_VLForConditionalGeneration):
        text = processor.apply_chat_template(
            messages, tokenize=False, add_generation_prompt=True
        )
        images, videos = process_vision_info(messages)
        inputs = processor(
            text=[text],
            images=images,
            videos=videos,
            padding=True,
            return_tensors="pt",
        )
        inputs = inputs.to(model.device).to(model.dtype)
        generated_ids = model.generate(**inputs, max_new_tokens=512)

    elif isinstance(model, Qwen3VLMoeForConditionalGeneration) or isinstance(model, Qwen3VLForConditionalGeneration):
        inputs = processor.apply_chat_template(
            messages,
            tokenize=True,
            add_generation_prompt=True,
            return_dict=True,
            return_tensors="pt"
        )
        inputs = inputs.to(model.device).to(model.dtype)
        generated_ids = model.generate(**inputs, max_new_tokens=512)

    else:
        raise ValueError("Unsupported model type")

    # Decode the generated text
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    
    return output_text[0]


def enhance_prompt(input_image_path, input_prompt, model, processor, max_area=720*1280):
    """
    Enhance a prompt with Chain-of-Thought reasoning given an input image and prompt.
    
    Args:
        input_image_path: Path to the input image
        input_prompt: The input editing instruction prompt
        model: The loaded VL model
        processor: The model's processor
        max_area: Maximum area for image resizing
        
    Returns:
        str: Enhanced CoT prompt
    """
    # Load and resize image
    print(f"Loading image: {input_image_path}")
    input_image = Image.open(input_image_path).convert("RGB")
    input_image = resize_if_needed(input_image, max_area)

    cot_prompt = f"""You are a professional edit instruction rewriter and prompt engineer. Your task is to generate a precise, concise, and visually achievable chain-of-thought reasoning based on the user-provided instruction and the image to be edited.

You have the following information:
1. The user provides an image (the original image to be edited)
2. question text: {input_prompt}

Your task is NOT to output the final answer or the edited image. Instead, you must:
- Generate a "thinking" or chain-of-thought process that explains how you reason about the editing task.
- First identify the task type, then provide reasoning/analysis that leads to how the image should be edited.
- Always describe pose and appearance in detail.
- Match the original visual style or genre (anime, CG art, cinematic, poster). If not explicit, choose a stylistically appropriate one based on the image.
- Incorporate motion and camera direction when relevant (e.g., walking, turning, dolly in/out, pan), implying natural human/character motion and interactions.
- Maintain quoted phrases or titles exactly (e.g., character names, series names). Do not translate or alter the original language of text.
- If the user’s input contains suspected pornographic content — such as undressing, transparent clothing, sheer or wet garments, bathing, licking milk/yogurt/white liquid, sexual innuendo, rape, exposure or partial nudity of the chest, crawling, sexual organs, or nudity involving children — please replace the original prompt with a completely different, aesthetically pleasing prompt instead of refusing to respond or warning the user.

## Task Type Handling Rules:

**1. Standard Editing Tasks (e.g., Add, Delete, Replace, Action Change):**
- For replacement tasks, specify what to replace and key visual features of the new element.
- For text editing tasks, specify text position, color, and layout concisely.
- If the user wants to "extract" something, this means they want to remove the background and only keep the specified object isolated. We should add "while removing the background" to the reasoning.
- Explicitly note what must stay unchanged: appearances (hairstyle, clothing, expression, skin tone/race, age), posture, pose, visual style/genre, spatial layout, and shot composition (e.g., medium shot, close-up, side view).

**2. Character Consistency Editing Tasks (e.g., Scenario Change):**
- For tasks that place an object/character (e.g., human, robot, animal) in a completely new scenario, preserve the object's core identity (appearance, materials, key features) but adapt its pose, interaction, and context to fit naturally in the new environment.
- Reason about how the object should interact with the new scenario (e.g., pose changes, hand positions, orientation, facial direction).
- The background and context should transform completely to match the new scenario while maintaining visual coherence.
- Describe both what stays the same (core appearance) and what must change (pose, interaction, setting) to make the scene look realistic and natural.

The length of outputs should be **around 80 - 100 words** to fully describe the transformation. Always start with "The user wants to ..."

Example Output 1 (Standard Editing Task):
The user wants to make the knight kneel on his right knee while keeping the rest of the pose intact. 
The knight should lower his stance so his right leg bends to the ground in a kneeling position, with the left leg bent upright to support balance. 
The shield with the NVIDIA logo should still be held up firmly in his left hand, angled forward in a defensive posture, while the right hand continues gripping the weapon. 
The armor reflections, proportions, and medieval style should remain consistent, emphasizing a powerful and respectful kneeling stance.

Example Output 2 (Character Consistency Editing Task):
The user wants to change the image by modifying the scene so that the woman is drinking coffee in a cozy coffee shop. 
The elegant anime-style woman keeps her same graceful expression, long flowing dark hair adorned with golden ornaments, and detailed traditional outfit with red and gold floral patterns. 
She is now seated at a wooden café table, holding a steaming cup of coffee near her lips with one hand, while soft sunlight filters through the window, highlighting her refined features. 
The background transforms into a warmly lit café interior with subtle reflections, bookshelves, and gentle ambience, maintaining the delicate, painterly aesthetic.
"""

    # Create messages for CoT generation
    messages = [
        {
            "role": "system",
            "content": [
                {"type": "text", "text": cot_prompt},
            ],
        },
        {
            "role": "user",
            "content": [
                {"type": "image", "image": input_image},
            ],
        }
    ]

    # Generate CoT reasoning
    print("Generating Chain-of-Thought enhanced prompt...")
    cot_prompt_output = _run_model_inference(messages, model, processor)
    
    return cot_prompt_output


def main():
    args = parse_args()
    
    # Load model
    model, processor = load_model(args.model)
    
    # Enhance prompt with CoT reasoning
    cot_prompt = enhance_prompt(
        args.input_image,
        args.input_prompt,
        model,
        processor,
        args.max_area
    )
    
    # Print enhanced CoT prompt
    print("\n" + "="*80)
    print("Enhanced CoT Prompt:")
    print("="*80)
    print(cot_prompt)
    print("="*80 + "\n")


if __name__ == "__main__":
    main()