Abhiroopvanaone commited on
Commit
e74e313
Β·
verified Β·
1 Parent(s): f392e49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -197
app.py CHANGED
@@ -1,200 +1,152 @@
1
  import spaces
2
- import gradio as gr
3
- import torch
4
- from transformers import pipeline
5
- from PIL import Image
6
- import time
7
- import traceback
8
-
9
- # Global model storage for Zero GPU compatibility
10
- models = {}
11
-
12
- @spaces.GPU(duration=300)
13
- def load_model_on_gpu(model_choice):
14
- """Load GLM model on GPU - separated for clarity."""
15
- model_map = {
16
- "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
17
- "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
18
- "GLM-4.5V": "zai-org/GLM-4.5V"
19
- }
20
-
21
- model_name = model_map.get(model_choice)
22
- if not model_name:
23
- return False, f"Unknown model: {model_choice}"
24
-
25
- if model_name in models:
26
- return True, f"βœ… {model_choice} already loaded"
27
-
28
- try:
29
- pipe = pipeline(
30
- "image-text-to-text",
31
- model=model_name,
32
- device_map="auto",
33
- torch_dtype=torch.float16,
34
- trust_remote_code=True
35
- )
36
- models[model_name] = pipe
37
- return True, f"βœ… {model_choice} loaded successfully"
38
- except Exception as e:
39
- return False, f"❌ Failed to load {model_choice}: {str(e)[:200]}"
40
-
41
- @spaces.GPU(duration=120)
42
- def generate_code(image, model_choice, prompt_style):
43
- """Generate CADQuery code - main GPU function."""
44
- if image is None:
45
- return "❌ Please upload an image first."
46
-
47
- # Create prompts
48
- prompts = {
49
- "Simple": "Generate CADQuery Python code for this 3D model:",
50
- "Detailed": "Analyze this 3D CAD model and generate Python CADQuery code.\n\nRequirements:\n- Import cadquery as cq\n- Store result in 'result' variable\n- Use proper CADQuery syntax\n\nCode:",
51
- "Chain-of-Thought": "Analyze this 3D CAD model step by step:\n\nStep 1: Identify the basic geometry\nStep 2: Note any features\nStep 3: Generate clean CADQuery Python code\n\n```python\nimport cadquery as cq\n\n# Generated code:"
52
- }
53
-
54
- try:
55
- # Load model if needed
56
- model_map = {
57
- "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
58
- "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
59
- "GLM-4.5V": "zai-org/GLM-4.5V"
60
- }
61
-
62
- model_name = model_map[model_choice]
63
-
64
- if model_name not in models:
65
- pipe = pipeline(
66
- "image-text-to-text",
67
- model=model_name,
68
- device_map="auto",
69
- torch_dtype=torch.float16,
70
- trust_remote_code=True
71
- )
72
- models[model_name] = pipe
73
- else:
74
- pipe = models[model_name]
75
-
76
- # Generate
77
- messages = [{
78
- "role": "user",
79
- "content": [
80
- {"type": "image", "image": image},
81
- {"type": "text", "text": prompts[prompt_style]}
82
- ]
83
- }]
84
-
85
- result = pipe(messages, max_new_tokens=512, temperature=0.7)
86
-
87
- if isinstance(result, list) and len(result) > 0:
88
- generated_text = result[0].get("generated_text", str(result))
89
- else:
90
- generated_text = str(result)
91
-
92
- # Simple code extraction
93
- code = generated_text.strip()
94
- if "```python" in code:
95
- start = code.find("```python") + 9
96
- end = code.find("```", start)
97
- if end > start:
98
- code = code[start:end].strip()
99
-
100
- if "import cadquery" not in code:
101
- code = "import cadquery as cq\n\n" + code
102
-
103
- return f"""## 🎯 Generated CADQuery Code
104
-
105
- ```python
106
- {code}
107
- ```
108
-
109
- ## πŸ“Š Info
110
- - **Model**: {model_choice}
111
- - **Prompt**: {prompt_style}
112
- - **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
113
-
114
- ## πŸ”§ Usage
115
- ```bash
116
- pip install cadquery
117
- python your_script.py
118
- ```
119
- """
120
-
121
- except Exception as e:
122
- return f"❌ **Generation Failed**: {str(e)[:500]}"
123
-
124
- def test_model(model_choice):
125
- """Test model loading."""
126
- success, message = load_model_on_gpu(model_choice)
127
- return f"## Test Result\n\n{message}"
128
-
129
- def system_info():
130
- """Get system info."""
131
- info = f"""## πŸ–₯️ System Information
132
-
133
- - **CUDA Available**: {torch.cuda.is_available()}
134
- - **CUDA Devices**: {torch.cuda.device_count() if torch.cuda.is_available() else 0}
135
- - **PyTorch Version**: {torch.__version__}
136
- - **Device**: {"GPU" if torch.cuda.is_available() else "CPU"}
137
- """
138
  return info
139
 
140
- # Create interface
141
- with gr.Blocks(title="GLM-4.5V CAD Generator", theme=gr.themes.Soft()) as demo:
142
- gr.Markdown("""
143
- # πŸ”§ GLM-4.5V CAD Generator
144
-
145
- Generate CADQuery Python code from 3D CAD model images using GLM-4.5V models!
146
-
147
- **Models**: GLM-4.5V-AWQ (fastest) | GLM-4.5V-FP8 (balanced) | GLM-4.5V (best quality)
148
- """)
149
-
150
- with gr.Tab("πŸš€ Generate"):
151
- with gr.Row():
152
- with gr.Column():
153
- image_input = gr.Image(type="pil", label="Upload CAD Model Image")
154
- model_choice = gr.Dropdown(
155
- choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
156
- value="GLM-4.5V-AWQ",
157
- label="Select Model"
158
- )
159
- prompt_style = gr.Dropdown(
160
- choices=["Simple", "Detailed", "Chain-of-Thought"],
161
- value="Chain-of-Thought",
162
- label="Prompt Style"
163
- )
164
- generate_btn = gr.Button("πŸš€ Generate CADQuery Code", variant="primary")
165
-
166
- with gr.Column():
167
- output = gr.Markdown("Upload an image and click Generate!")
168
-
169
- generate_btn.click(
170
- fn=generate_code,
171
- inputs=[image_input, model_choice, prompt_style],
172
- outputs=output
173
- )
174
-
175
- with gr.Tab("πŸ§ͺ Test"):
176
- with gr.Row():
177
- with gr.Column():
178
- test_model_choice = gr.Dropdown(
179
- choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
180
- value="GLM-4.5V-AWQ",
181
- label="Model to Test"
182
- )
183
- test_btn = gr.Button("πŸ§ͺ Test Model")
184
-
185
- with gr.Column():
186
- test_output = gr.Markdown("Click Test Model to check loading.")
187
-
188
- test_btn.click(fn=test_model, inputs=test_model_choice, outputs=test_output)
189
-
190
- with gr.Tab("βš™οΈ System"):
191
- info_display = gr.Markdown()
192
- refresh_btn = gr.Button("πŸ”„ Refresh")
193
-
194
- demo.load(fn=system_info, outputs=info_display)
195
- refresh_btn.click(fn=system_info, outputs=info_display)
196
-
197
- if __name__ == "__main__":
198
- print("πŸš€ Starting GLM-4.5V CAD Generator...")
199
- print(f"CUDA available: {torch.cuda.is_available()}")
200
- demo.launch(share=True, show_error=True)
 
1
  import spaces
2
+ import gradio as gr
3
+ import torch
4
+ from transformers import pipeline
5
+ from PIL import Image
6
+
7
+ # Global model storage
8
+ models = {}
9
+
10
+ @spaces.GPU(duration=120)
11
+ def generate_code(image, model_choice, prompt_style):
12
+ """Generate CADQuery code - single GPU function."""
13
+ if image is None:
14
+ return "❌ Please upload an image first."
15
+
16
+ # Create prompts
17
+ prompts = {
18
+ "Simple": "Generate CADQuery Python code for this 3D model:",
19
+ "Detailed": "Analyze this 3D CAD model and generate Python CADQuery
20
+ code.\n\nRequirements:\n- Import cadquery as cq\n- Store result in 'result' variable\n- Use
21
+ proper CADQuery syntax\n\nCode:",
22
+ "Chain-of-Thought": "Analyze this 3D CAD model step by step:\n\nStep 1: Identify the
23
+ basic geometry\nStep 2: Note any features\nStep 3: Generate clean CADQuery Python
24
+ code\n\n```python\nimport cadquery as cq\n\n# Generated code:"
25
+ }
26
+
27
+ try:
28
+ # Model mapping
29
+ model_map = {
30
+ "GLM-4.5V-AWQ": "QuantTrio/GLM-4.5V-AWQ",
31
+ "GLM-4.5V-FP8": "zai-org/GLM-4.5V-FP8",
32
+ "GLM-4.5V": "zai-org/GLM-4.5V"
33
+ }
34
+
35
+ model_name = model_map[model_choice]
36
+
37
+ # Load or get cached model
38
+ if model_name not in models:
39
+ pipe = pipeline(
40
+ "image-text-to-text",
41
+ model=model_name,
42
+ device_map="auto",
43
+ torch_dtype=torch.float16,
44
+ trust_remote_code=True
45
+ )
46
+ models[model_name] = pipe
47
+ else:
48
+ pipe = models[model_name]
49
+
50
+ # Generate
51
+ messages = [{
52
+ "role": "user",
53
+ "content": [
54
+ {"type": "image", "image": image},
55
+ {"type": "text", "text": prompts[prompt_style]}
56
+ ]
57
+ }]
58
+
59
+ result = pipe(messages, max_new_tokens=512, temperature=0.7)
60
+
61
+ if isinstance(result, list) and len(result) > 0:
62
+ generated_text = result[0].get("generated_text", str(result))
63
+ else:
64
+ generated_text = str(result)
65
+
66
+ # Extract code
67
+ code = generated_text.strip()
68
+ if "```python" in code:
69
+ start = code.find("```python") + 9
70
+ end = code.find("```", start)
71
+ if end > start:
72
+ code = code[start:end].strip()
73
+
74
+ if "import cadquery" not in code:
75
+ code = "import cadquery as cq\n\n" + code
76
+
77
+ return f"""## 🎯 Generated CADQuery Code
78
+
79
+ ```python
80
+ {code}
81
+
82
+ πŸ“Š Info
83
+
84
+ - Model: {model_choice}
85
+ - Prompt: {prompt_style}
86
+ - Device: GPU
87
+
88
+ πŸ”§ Usage
89
+
90
+ pip install cadquery
91
+ python your_script.py
92
+ """
93
+
94
+ except Exception as e:
95
+ return f"❌ **Generation Failed**: {str(e)[:500]}"
96
+ def system_info():
97
+ """Get system info - no GPU needed."""
98
+ info = f"""## πŸ–₯️ System Information
99
+
100
+ - CUDA Available: {torch.cuda.is_available()}
101
+ - CUDA Devices: {torch.cuda.device_count() if torch.cuda.is_available() else 0}
102
+ - PyTorch Version: {torch.version}
103
+ - Device: {"GPU" if torch.cuda.is_available() else "CPU"}
104
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  return info
106
 
107
+ Create interface
108
+
109
+ with gr.Blocks(title="GLM-4.5V CAD Generator", theme=gr.themes.Soft()) as demo:
110
+ gr.Markdown("""
111
+ # πŸ”§ GLM-4.5V CAD Generator
112
+
113
+ Generate CADQuery Python code from 3D CAD model images using GLM-4.5V models!
114
+
115
+ **Models**: GLM-4.5V-AWQ (fastest) | GLM-4.5V-FP8 (balanced) | GLM-4.5V (best quality)
116
+ """)
117
+
118
+ with gr.Tab("πŸš€ Generate"):
119
+ with gr.Row():
120
+ with gr.Column():
121
+ image_input = gr.Image(type="pil", label="Upload CAD Model Image")
122
+ model_choice = gr.Dropdown(
123
+ choices=["GLM-4.5V-AWQ", "GLM-4.5V-FP8", "GLM-4.5V"],
124
+ value="GLM-4.5V-AWQ",
125
+ label="Select Model"
126
+ )
127
+ prompt_style = gr.Dropdown(
128
+ choices=["Simple", "Detailed", "Chain-of-Thought"],
129
+ value="Chain-of-Thought",
130
+ label="Prompt Style"
131
+ )
132
+ generate_btn = gr.Button("πŸš€ Generate CADQuery Code", variant="primary")
133
+
134
+ with gr.Column():
135
+ output = gr.Markdown("Upload an image and click Generate!")
136
+
137
+ generate_btn.click(
138
+ fn=generate_code,
139
+ inputs=[image_input, model_choice, prompt_style],
140
+ outputs=output
141
+ )
142
+
143
+ with gr.Tab("βš™οΈ System"):
144
+ info_display = gr.Markdown()
145
+ refresh_btn = gr.Button("πŸ”„ Refresh")
146
+
147
+ demo.load(fn=system_info, outputs=info_display)
148
+ refresh_btn.click(fn=system_info, outputs=info_display)
149
+ if name == "main":
150
+ print("πŸš€ Starting GLM-4.5V CAD Generator...")
151
+ print(f"CUDA available: {torch.cuda.is_available()}")
152
+ demo.launch()