Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| from PIL import Image, ImageDraw, ImageFilter | |
| import cv2 | |
| import os | |
| from io import BytesIO | |
| import base64 | |
| class VirtualTryOnRoom: | |
| def __init__(self): | |
| self.person_img = None | |
| self.cloth_img = None | |
| self.result_img = None | |
| def preprocess_image(self, image, target_size=(512, 512)): | |
| """Preprocess image to consistent size""" | |
| if image is None: | |
| return None | |
| image = Image.fromarray(image) | |
| image = image.resize(target_size, Image.Resampling.LANCZOS) | |
| return np.array(image) | |
| def load_person_image(self, image): | |
| """Load and preprocess person image""" | |
| if image is None: | |
| return None, "Please upload a person image first!" | |
| self.person_img = self.preprocess_image(image) | |
| return self.person_img, f"Person image loaded successfully! Size: {self.person_img.shape[1]}x{self.person_img.shape[0]}" | |
| def load_cloth_image(self, image): | |
| """Load and preprocess clothing image""" | |
| if image is None: | |
| return None, "Please upload a clothing image first!" | |
| self.cloth_img = self.preprocess_image(image) | |
| return self.cloth_img, f"Clothing image loaded successfully! Size: {self.cloth_img.shape[1]}x{self.cloth_img.shape[0]}" | |
| def extract_person_mask(self, image): | |
| """Simple person detection and masking using edge detection""" | |
| gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) | |
| # Create a simple oval mask for person region (assuming person is in center) | |
| mask = np.zeros(gray.shape, dtype=np.uint8) | |
| center_x, center_y = gray.shape[1] // 2, gray.shape[0] // 2 | |
| axes_x, axes_y = gray.shape[1] // 3, gray.shape[0] // 2 | |
| cv2.ellipse(mask, (center_x, center_y), (axes_x, axes_y), 0, 0, 360, 255, -1) | |
| # Apply Gaussian blur to soften edges | |
| mask = cv2.GaussianBlur(mask, (51, 51), 0) | |
| mask = mask.astype(np.float32) / 255.0 | |
| return mask | |
| def simple_virtual_tryon(self): | |
| """Simple virtual try-on implementation""" | |
| if self.person_img is None or self.cloth_img is None: | |
| return None, "Please upload both person and clothing images first!" | |
| try: | |
| # Create working copies | |
| person_copy = self.person_img.copy() | |
| cloth_copy = self.cloth_img.copy() | |
| # Resize clothing to fit person width | |
| cloth_height, cloth_width = cloth_copy.shape[:2] | |
| person_height, person_width = person_copy.shape[:2] | |
| # Calculate scaling to fit clothing width to person's width | |
| scale_width = person_width / cloth_width | |
| target_height = int(cloth_height * scale_width * 0.8) # Make it a bit smaller | |
| # Resize clothing | |
| cloth_resized = cv2.resize(cloth_copy, (person_width, target_height)) | |
| # Extract person mask | |
| person_mask = self.extract_person_mask(person_copy) | |
| # Create result image | |
| result = person_copy.copy() | |
| # Position clothing on upper body (simple positioning) | |
| start_y = person_height // 6 | |
| end_y = start_y + target_height | |
| start_x = 0 | |
| end_x = person_width | |
| if end_y <= person_height and end_x <= person_width: | |
| # Blend clothing with person | |
| for c in range(3): # For each color channel | |
| # Get clothing and person regions | |
| cloth_region = cloth_resized[:, :, c] | |
| person_region = person_copy[start_y:end_y, start_x:end_x, c] | |
| mask_region = person_mask[start_y:end_y, start_x:end_x] | |
| # Blend using mask | |
| blended = person_region.astype(np.float32) * (1 - 0.7 * mask_region) + cloth_region.astype(np.float32) * (0.7 * mask_region) | |
| result[start_y:end_y, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8) | |
| self.result_img = result | |
| return result, "Virtual try-on completed! Use the sliders below for fine-tuning." | |
| except Exception as e: | |
| return None, f"Error during virtual try-on: {str(e)}" | |
| def adjust_opacity(self, opacity): | |
| """Adjust the opacity of the clothing overlay""" | |
| if self.person_img is None or self.result_img is None: | |
| return None, "Please complete the virtual try-on first!" | |
| try: | |
| # Recreate the blend with new opacity | |
| result = self.person_img.copy() | |
| cloth_copy = self.cloth_img.copy() | |
| # Resize clothing to fit person width | |
| cloth_height, cloth_width = cloth_copy.shape[:2] | |
| person_height, person_width = self.person_img.shape[:2] | |
| scale_width = person_width / cloth_width | |
| target_height = int(cloth_height * scale_width * 0.8) | |
| cloth_resized = cv2.resize(cloth_copy, (person_width, target_height)) | |
| person_mask = self.extract_person_mask(self.person_img) | |
| # Position clothing on upper body | |
| start_y = person_height // 6 | |
| end_y = start_y + target_height | |
| start_x = 0 | |
| end_x = person_width | |
| if end_y <= person_height and end_x <= person_width: | |
| for c in range(3): | |
| cloth_region = cloth_resized[:, :, c] | |
| person_region = self.person_img[start_y:end_y, start_x:end_x, c] | |
| mask_region = person_mask[start_y:end_y, start_x:end_x] | |
| # Use adjustable opacity | |
| blended = person_region.astype(np.float32) * (1 - opacity * mask_region) + cloth_region.astype(np.float32) * (opacity * mask_region) | |
| result[start_y:end_y, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8) | |
| return result, f"Opacity adjusted to {opacity:.2f}" | |
| except Exception as e: | |
| return None, f"Error adjusting opacity: {str(e)}" | |
| def adjust_position(self, vertical_offset): | |
| """Adjust the vertical position of the clothing""" | |
| if self.person_img is None or self.cloth_img is None: | |
| return None, "Please complete the virtual try-on first!" | |
| try: | |
| result = self.person_img.copy() | |
| cloth_copy = self.cloth_img.copy() | |
| # Resize clothing to fit person width | |
| cloth_height, cloth_width = cloth_copy.shape[:2] | |
| person_height, person_width = self.person_img.shape[:2] | |
| scale_width = person_width / cloth_width | |
| target_height = int(cloth_height * scale_width * 0.8) | |
| cloth_resized = cv2.resize(cloth_copy, (person_width, target_height)) | |
| person_mask = self.extract_person_mask(self.person_img) | |
| # Adjust vertical position | |
| base_y = person_height // 6 | |
| start_y = max(0, base_y + int(vertical_offset * 50)) # Scale the offset | |
| end_y = start_y + target_height | |
| start_x = 0 | |
| end_x = person_width | |
| # Check if position is valid | |
| if start_y >= person_height or end_y <= 0: | |
| start_y = max(0, base_y) | |
| end_y = min(person_height, start_y + target_height) | |
| # Apply clothing with adjusted position | |
| for c in range(3): | |
| if end_y > 0 and start_y < person_height: | |
| cloth_y_start = max(0, -start_y) | |
| cloth_y_end = min(target_height, person_height - start_y) | |
| person_y_start = max(0, start_y) | |
| person_y_end = min(person_height, end_y) | |
| if cloth_y_end > cloth_y_start and person_y_end > person_y_start: | |
| cloth_region = cloth_resized[cloth_y_start:cloth_y_end, :, c] | |
| person_region = self.person_img[person_y_start:person_y_end, start_x:end_x, c] | |
| mask_region = person_mask[person_y_start:person_y_end, start_x:end_x] | |
| blended = person_region.astype(np.float32) * (1 - 0.7 * mask_region) + cloth_region.astype(np.float32) * (0.7 * mask_region) | |
| result[person_y_start:person_y_end, start_x:end_x, c] = np.clip(blended, 0, 255).astype(np.uint8) | |
| return result, f"Position adjusted (vertical offset: {vertical_offset:.2f})" | |
| except Exception as e: | |
| return None, f"Error adjusting position: {str(e)}" | |
| def reset_app(self): | |
| """Reset the application state""" | |
| self.person_img = None | |
| self.cloth_img = None | |
| self.result_img = None | |
| return None, None, None, "Application reset! Please upload new images to start." | |
| tryon_app = VirtualTryOnRoom() | |
| # Create the Gradio interface | |
| with gr.Blocks(title="Virtual Cloth Trial Room", theme=gr.themes.Soft()) as demo: | |
| # Header | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 20px; background: linear-gradient(45deg, #667eea 0%, #764ba2 100%); border-radius: 10px; margin-bottom: 20px;"> | |
| <h1 style="color: white; margin: 0; font-size: 2.5em;">Virtual Cloth Trial Room</h1> | |
| <p style="color: white; margin: 10px 0; font-size: 1.2em;">Try on clothes virtually with AI-powered technology</p> | |
| <div style="color: white; margin-top: 15px;"> | |
| <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #ffeb3b; text-decoration: none; font-weight: bold;">Built with anycoder</a> | |
| </div> | |
| </div> | |
| """) | |
| # Instructions | |
| gr.HTML(""" | |
| <div style="background: #f8f9fa; padding: 20px; border-radius: 10px; margin-bottom: 20px; border-left: 5px solid #667eea;"> | |
| <h3 style="color: #333; margin-top: 0;">How to use:</h3> | |
| <ol style="color: #555; line-height: 1.6;"> | |
| <li>Upload a photo of yourself (preferably standing upright)</li> | |
| <li>Upload an image of the clothing you want to try on</li> | |
| <li>Click "Try On" to see the virtual try-on result</li> | |
| <li>Use the sliders below to adjust opacity and position</li> | |
| </ol> | |
| <p style="color: #777; font-size: 0.9em; margin-bottom: 0;"> | |
| <strong>Note:</strong> This is a simplified demo. For best results, use clear photos with good lighting. | |
| </p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.HTML("<h3 style='text-align: center; color: #333;'>Upload Person Photo</h3>") | |
| person_input = gr.Image(label="Person Image", type="numpy", height=400) | |
| person_status = gr.Textbox(label="Status", interactive=False) | |
| with gr.Column(scale=1): | |
| gr.HTML("<h3 style='text-align: center; color: #333;'>Upload Clothing</h3>") | |
| cloth_input = gr.Image(label="Clothing Image", type="numpy", height=400) | |
| cloth_status = gr.Textbox(label="Status", interactive=False) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| tryon_button = gr.Button("🛍️ Try On Clothing", variant="primary", size="lg") | |
| with gr.Column(scale=1): | |
| reset_button = gr.Button("🔄 Reset", variant="secondary", size="lg") | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.HTML("<h3 style='text-align: center; color: #333;'>Result</h3>") | |
| result_output = gr.Image(label="Virtual Try-On Result", height=500) | |
| result_status = gr.Textbox(label="Status", interactive=False) | |
| # Adjustment controls | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.HTML("<h4 style='text-align: center; color: #333;'>Adjustments</h4>") | |
| opacity_slider = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.05, | |
| label="Clothing Opacity", | |
| info="Adjust how transparent the clothing appears" | |
| ) | |
| with gr.Column(): | |
| position_slider = gr.Slider( | |
| minimum=-2.0, | |
| maximum=2.0, | |
| value=0.0, | |
| step=0.1, | |
| label="Vertical Position", | |
| info="Move clothing up or down" | |
| ) | |
| # Image loading handlers | |
| person_input.change( | |
| fn=tryon_app.load_person_image, | |
| inputs=person_input, | |
| outputs=[person_input, person_status] | |
| ) | |
| cloth_input.change( | |
| fn=tryon_app.load_cloth_image, | |
| inputs=cloth_input, | |
| outputs=[cloth_input, cloth_status] | |
| ) | |
| # Button handlers | |
| tryon_button.click( | |
| fn=tryon_app.simple_virtual_tryon, | |
| outputs=[result_output, result_status] | |
| ) | |
| opacity_slider.change( | |
| fn=tryon_app.adjust_opacity, | |
| inputs=opacity_slider, | |
| outputs=[result_output, result_status] | |
| ) | |
| position_slider.change( | |
| fn=tryon_app.adjust_position, | |
| inputs=position_slider, | |
| outputs=[result_output, result_status] | |
| ) | |
| reset_button.click( | |
| fn=tryon_app.reset_app, | |
| outputs=[person_input, cloth_input, result_output, result_status] | |
| ) | |
| # Footer | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 20px; background: #f1f3f4; border-radius: 10px; margin-top: 30px;"> | |
| <p style="color: #666; margin: 0;"> | |
| This virtual try-on system uses computer vision and image processing techniques to simulate clothing try-on. | |
| <br> | |
| <strong>Tip:</strong> For better results, use high-quality images with clear contrast between person and background. | |
| </p> | |
| </div> | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch(share=True, debug=True) |