Abhiroopvanaone commited on
Commit
f392e49
Β·
verified Β·
1 Parent(s): 22decf7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +199 -199
app.py CHANGED
@@ -1,200 +1,200 @@
1
- import spaces
2
- import gradio as gr
3
- import torch
4
- from transformers import pipeline
5
- from PIL import Image
6
- import time
7
- import traceback
8
-
9
- # Global model storage for Zero GPU compatibility
10
- models = {}
11
-
12
- @spaces.GPU(duration=300)
13
- def load_model_on_gpu(model_choice):
14
- """Load GLM model on GPU - separated for clarity."""
15
- model_map = {
16
- "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
17
- "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
18
- "GLM-4.5V": "zai-org/GLM-4.5V"
19
- }
20
-
21
- model_name = model_map.get(model_choice)
22
- if not model_name:
23
- return False, f"Unknown model: {model_choice}"
24
-
25
- if model_name in models:
26
- return True, f"βœ… {model_choice} already loaded"
27
-
28
- try:
29
- pipe = pipeline(
30
- "image-text-to-text",
31
- model=model_name,
32
- device_map="auto",
33
- torch_dtype=torch.float16,
34
- trust_remote_code=True
35
- )
36
- models[model_name] = pipe
37
- return True, f"βœ… {model_choice} loaded successfully"
38
- except Exception as e:
39
- return False, f"❌ Failed to load {model_choice}: {str(e)[:200]}"
40
-
41
- @spaces.GPU(duration=120)
42
- def generate_code(image, model_choice, prompt_style):
43
- """Generate CADQuery code - main GPU function."""
44
- if image is None:
45
- return "❌ Please upload an image first."
46
-
47
- # Create prompts
48
- prompts = {
49
- "Simple": "Generate CADQuery Python code for this 3D model:",
50
- "Detailed": "Analyze this 3D CAD model and generate Python CADQuery code.\n\nRequirements:\n- Import cadquery as cq\n- Store result in 'result' variable\n- Use proper CADQuery syntax\n\nCode:",
51
- "Chain-of-Thought": "Analyze this 3D CAD model step by step:\n\nStep 1: Identify the basic geometry\nStep 2: Note any features\nStep 3: Generate clean CADQuery Python code\n\n```python\nimport cadquery as cq\n\n# Generated code:"
52
- }
53
-
54
- try:
55
- # Load model if needed
56
- model_map = {
57
- "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
58
- "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
59
- "GLM-4.5V": "zai-org/GLM-4.5V"
60
- }
61
-
62
- model_name = model_map[model_choice]
63
-
64
- if model_name not in models:
65
- pipe = pipeline(
66
- "image-text-to-text",
67
- model=model_name,
68
- device_map="auto",
69
- torch_dtype=torch.float16,
70
- trust_remote_code=True
71
- )
72
- models[model_name] = pipe
73
- else:
74
- pipe = models[model_name]
75
-
76
- # Generate
77
- messages = [{
78
- "role": "user",
79
- "content": [
80
- {"type": "image", "image": image},
81
- {"type": "text", "text": prompts[prompt_style]}
82
- ]
83
- }]
84
-
85
- result = pipe(messages, max_new_tokens=512, temperature=0.7)
86
-
87
- if isinstance(result, list) and len(result) > 0:
88
- generated_text = result[0].get("generated_text", str(result))
89
- else:
90
- generated_text = str(result)
91
-
92
- # Simple code extraction
93
- code = generated_text.strip()
94
- if "```python" in code:
95
- start = code.find("```python") + 9
96
- end = code.find("```", start)
97
- if end > start:
98
- code = code[start:end].strip()
99
-
100
- if "import cadquery" not in code:
101
- code = "import cadquery as cq\n\n" + code
102
-
103
- return f"""## 🎯 Generated CADQuery Code
104
-
105
- ```python
106
- {code}
107
- ```
108
-
109
- ## πŸ“Š Info
110
- - **Model**: {model_choice}
111
- - **Prompt**: {prompt_style}
112
- - **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
113
-
114
- ## πŸ”§ Usage
115
- ```bash
116
- pip install cadquery
117
- python your_script.py
118
- ```
119
- """
120
-
121
- except Exception as e:
122
- return f"❌ **Generation Failed**: {str(e)[:500]}"
123
-
124
- def test_model(model_choice):
125
- """Test model loading."""
126
- success, message = load_model_on_gpu(model_choice)
127
- return f"## Test Result\n\n{message}"
128
-
129
- def system_info():
130
- """Get system info."""
131
- info = f"""## πŸ–₯️ System Information
132
-
133
- - **CUDA Available**: {torch.cuda.is_available()}
134
- - **CUDA Devices**: {torch.cuda.device_count() if torch.cuda.is_available() else 0}
135
- - **PyTorch Version**: {torch.__version__}
136
- - **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
137
- """
138
- return info
139
-
140
- # Create interface
141
- with gr.Blocks(title="GLM-4.5V CAD Generator", theme=gr.themes.Soft()) as demo:
142
- gr.Markdown("""
143
- # πŸ”§ GLM-4.5V CAD Generator
144
-
145
- Generate CADQuery Python code from 3D CAD model images using GLM-4.5V models!
146
-
147
- **Models**: GLM-4.5V-AWQ (fastest) | GLM-4.5V-FP8 (balanced) | GLM-4.5V (best quality)
148
- """)
149
-
150
- with gr.Tab("πŸš€ Generate"):
151
- with gr.Row():
152
- with gr.Column():
153
- image_input = gr.Image(type="pil", label="Upload CAD Model Image")
154
- model_choice = gr.Dropdown(
155
- choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
156
- value="GLM-4.5V-AWQ",
157
- label="Select Model"
158
- )
159
- prompt_style = gr.Dropdown(
160
- choices=["Simple", "Detailed", "Chain-of-Thought"],
161
- value="Chain-of-Thought",
162
- label="Prompt Style"
163
- )
164
- generate_btn = gr.Button("πŸš€ Generate CADQuery Code", variant="primary")
165
-
166
- with gr.Column():
167
- output = gr.Markdown("Upload an image and click Generate!")
168
-
169
- generate_btn.click(
170
- fn=generate_code,
171
- inputs=[image_input, model_choice, prompt_style],
172
- outputs=output
173
- )
174
-
175
- with gr.Tab("πŸ§ͺ Test"):
176
- with gr.Row():
177
- with gr.Column():
178
- test_model_choice = gr.Dropdown(
179
- choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
180
- value="GLM-4.5V-AWQ",
181
- label="Model to Test"
182
- )
183
- test_btn = gr.Button("πŸ§ͺ Test Model")
184
-
185
- with gr.Column():
186
- test_output = gr.Markdown("Click Test Model to check loading.")
187
-
188
- test_btn.click(fn=test_model, inputs=test_model_choice, outputs=test_output)
189
-
190
- with gr.Tab("βš™οΈ System"):
191
- info_display = gr.Markdown()
192
- refresh_btn = gr.Button("πŸ”„ Refresh")
193
-
194
- demo.load(fn=system_info, outputs=info_display)
195
- refresh_btn.click(fn=system_info, outputs=info_display)
196
-
197
- if __name__ == "__main__":
198
- print("πŸš€ Starting GLM-4.5V CAD Generator...")
199
- print(f"CUDA available: {torch.cuda.is_available()}")
200
  demo.launch(share=True, show_error=True)
 
1
+ import spaces
2
+ import gradio as gr
3
+ import torch
4
+ from transformers import pipeline
5
+ from PIL import Image
6
+ import time
7
+ import traceback
8
+
9
+ # Global model storage for Zero GPU compatibility
10
+ models = {}
11
+
12
+ @spaces.GPU(duration=300)
13
+ def load_model_on_gpu(model_choice):
14
+ """Load GLM model on GPU - separated for clarity."""
15
+ model_map = {
16
+ "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
17
+ "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
18
+ "GLM-4.5V": "zai-org/GLM-4.5V"
19
+ }
20
+
21
+ model_name = model_map.get(model_choice)
22
+ if not model_name:
23
+ return False, f"Unknown model: {model_choice}"
24
+
25
+ if model_name in models:
26
+ return True, f"βœ… {model_choice} already loaded"
27
+
28
+ try:
29
+ pipe = pipeline(
30
+ "image-text-to-text",
31
+ model=model_name,
32
+ device_map="auto",
33
+ torch_dtype=torch.float16,
34
+ trust_remote_code=True
35
+ )
36
+ models[model_name] = pipe
37
+ return True, f"βœ… {model_choice} loaded successfully"
38
+ except Exception as e:
39
+ return False, f"❌ Failed to load {model_choice}: {str(e)[:200]}"
40
+
41
+ @spaces.GPU(duration=120)
42
+ def generate_code(image, model_choice, prompt_style):
43
+ """Generate CADQuery code - main GPU function."""
44
+ if image is None:
45
+ return "❌ Please upload an image first."
46
+
47
+ # Create prompts
48
+ prompts = {
49
+ "Simple": "Generate CADQuery Python code for this 3D model:",
50
+ "Detailed": "Analyze this 3D CAD model and generate Python CADQuery code.\n\nRequirements:\n- Import cadquery as cq\n- Store result in 'result' variable\n- Use proper CADQuery syntax\n\nCode:",
51
+ "Chain-of-Thought": "Analyze this 3D CAD model step by step:\n\nStep 1: Identify the basic geometry\nStep 2: Note any features\nStep 3: Generate clean CADQuery Python code\n\n```python\nimport cadquery as cq\n\n# Generated code:"
52
+ }
53
+
54
+ try:
55
+ # Load model if needed
56
+ model_map = {
57
+ "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
58
+ "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
59
+ "GLM-4.5V": "zai-org/GLM-4.5V"
60
+ }
61
+
62
+ model_name = model_map[model_choice]
63
+
64
+ if model_name not in models:
65
+ pipe = pipeline(
66
+ "image-text-to-text",
67
+ model=model_name,
68
+ device_map="auto",
69
+ torch_dtype=torch.float16,
70
+ trust_remote_code=True
71
+ )
72
+ models[model_name] = pipe
73
+ else:
74
+ pipe = models[model_name]
75
+
76
+ # Generate
77
+ messages = [{
78
+ "role": "user",
79
+ "content": [
80
+ {"type": "image", "image": image},
81
+ {"type": "text", "text": prompts[prompt_style]}
82
+ ]
83
+ }]
84
+
85
+ result = pipe(messages, max_new_tokens=512, temperature=0.7)
86
+
87
+ if isinstance(result, list) and len(result) > 0:
88
+ generated_text = result[0].get("generated_text", str(result))
89
+ else:
90
+ generated_text = str(result)
91
+
92
+ # Simple code extraction
93
+ code = generated_text.strip()
94
+ if "```python" in code:
95
+ start = code.find("```python") + 9
96
+ end = code.find("```", start)
97
+ if end > start:
98
+ code = code[start:end].strip()
99
+
100
+ if "import cadquery" not in code:
101
+ code = "import cadquery as cq\n\n" + code
102
+
103
+ return f"""## 🎯 Generated CADQuery Code
104
+
105
+ ```python
106
+ {code}
107
+ ```
108
+
109
+ ## πŸ“Š Info
110
+ - **Model**: {model_choice}
111
+ - **Prompt**: {prompt_style}
112
+ - **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
113
+
114
+ ## πŸ”§ Usage
115
+ ```bash
116
+ pip install cadquery
117
+ python your_script.py
118
+ ```
119
+ """
120
+
121
+ except Exception as e:
122
+ return f"❌ **Generation Failed**: {str(e)[:500]}"
123
+
124
+ def test_model(model_choice):
125
+ """Test model loading."""
126
+ success, message = load_model_on_gpu(model_choice)
127
+ return f"## Test Result\n\n{message}"
128
+
129
+ def system_info():
130
+ """Get system info."""
131
+ info = f"""## πŸ–₯️ System Information
132
+
133
+ - **CUDA Available**: {torch.cuda.is_available()}
134
+ - **CUDA Devices**: {torch.cuda.device_count() if torch.cuda.is_available() else 0}
135
+ - **PyTorch Version**: {torch.__version__}
136
+ - **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
137
+ """
138
+ return info
139
+
140
+ # Create interface
141
+ with gr.Blocks(title="GLM-4.5V CAD Generator", theme=gr.themes.Soft()) as demo:
142
+ gr.Markdown("""
143
+ # πŸ”§ GLM-4.5V CAD Generator
144
+
145
+ Generate CADQuery Python code from 3D CAD model images using GLM-4.5V models!
146
+
147
+ **Models**: GLM-4.5V-AWQ (fastest) | GLM-4.5V-FP8 (balanced) | GLM-4.5V (best quality)
148
+ """)
149
+
150
+ with gr.Tab("πŸš€ Generate"):
151
+ with gr.Row():
152
+ with gr.Column():
153
+ image_input = gr.Image(type="pil", label="Upload CAD Model Image")
154
+ model_choice = gr.Dropdown(
155
+ choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
156
+ value="GLM-4.5V-AWQ",
157
+ label="Select Model"
158
+ )
159
+ prompt_style = gr.Dropdown(
160
+ choices=["Simple", "Detailed", "Chain-of-Thought"],
161
+ value="Chain-of-Thought",
162
+ label="Prompt Style"
163
+ )
164
+ generate_btn = gr.Button("πŸš€ Generate CADQuery Code", variant="primary")
165
+
166
+ with gr.Column():
167
+ output = gr.Markdown("Upload an image and click Generate!")
168
+
169
+ generate_btn.click(
170
+ fn=generate_code,
171
+ inputs=[image_input, model_choice, prompt_style],
172
+ outputs=output
173
+ )
174
+
175
+ with gr.Tab("πŸ§ͺ Test"):
176
+ with gr.Row():
177
+ with gr.Column():
178
+ test_model_choice = gr.Dropdown(
179
+ choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
180
+ value="GLM-4.5V-AWQ",
181
+ label="Model to Test"
182
+ )
183
+ test_btn = gr.Button("πŸ§ͺ Test Model")
184
+
185
+ with gr.Column():
186
+ test_output = gr.Markdown("Click Test Model to check loading.")
187
+
188
+ test_btn.click(fn=test_model, inputs=test_model_choice, outputs=test_output)
189
+
190
+ with gr.Tab("βš™οΈ System"):
191
+ info_display = gr.Markdown()
192
+ refresh_btn = gr.Button("πŸ”„ Refresh")
193
+
194
+ demo.load(fn=system_info, outputs=info_display)
195
+ refresh_btn.click(fn=system_info, outputs=info_display)
196
+
197
+ if __name__ == "__main__":
198
+ print("πŸš€ Starting GLM-4.5V CAD Generator...")
199
+ print(f"CUDA available: {torch.cuda.is_available()}")
200
  demo.launch(share=True, show_error=True)