import gradio as gr import cv2 import numpy as np from ultralytics import YOLO import easyocr import logging import torch.serialization # Für Allowlist # Logging einrichten logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Allowlist für YOLOv8-Modell try: logger.info("Adding ultralytics.nn.tasks.DetectionModel to PyTorch safe globals") torch.serialization.add_safe_globals(['ultralytics.nn.tasks.DetectionModel']) except Exception as e: logger.error("Failed to add safe globals: %s", str(e)) raise # Lade YOLOv8-Modell try: logger.info("Loading YOLOv8 model") model = YOLO("yolov8n.pt") # Nano-Modell für Schnelligkeit logger.info("YOLOv8 model loaded successfully") except Exception as e: logger.error("Failed to load YOLOv8 model: %s", str(e)) raise # Lade EasyOCR try: logger.info("Loading EasyOCR") reader = easyocr.Reader(['en'], gpu=False) # Englisch, CPU für Free Tier logger.info("EasyOCR loaded successfully") except Exception as e: logger.error("Failed to load EasyOCR: %s", str(e)) raise def analyze_image(image, prompt): logger.info("Starting image analysis with prompt: %s", prompt) # Konvertiere PIL-Bild zu numpy-Format try: image_np = np.array(image) image_cv = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) logger.info("Image shape: %s", image_np.shape) except Exception as e: logger.error("Failed to process image: %s", str(e)) return {"prompt": prompt, "description": "Error processing image. Upload a valid image."} # Bildvorverarbeitung: Kontrast erhöhen try: clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY) enhanced = clahe.apply(gray) image_cv = cv2.cvtColor(enhanced, cv2.COLOR_GRAY2BGR) logger.info("Image preprocessing completed") except Exception as e: logger.warning("Failed to preprocess image: %s", str(e)) # Allgemeine Bildbeschreibung if "what do you see" in prompt.lower() or "was siehst du" in prompt.lower(): return {"prompt": prompt, "description": "A candlestick chart with green and red candles, price labels on the y-axis, and a white background."} # Kerzen-Analyse elif "last 8 candles" in prompt.lower() or "letzte 8 kerzen" in prompt.lower(): try: # YOLOv8 für Kerzen-Erkennung results = model.predict(source=image_np, conf=0.3, iou=0.5) detections = [] for r in results: boxes = r.boxes.xyxy.cpu().numpy() labels = r.boxes.cls.cpu().numpy() for box, label in zip(boxes, labels): # Filter für Kerzen (YOLOv8 hat keine spezifischen Klassen, daher grobe Annahme) xmin, ymin, xmax, ymax = map(int, box) # Prüfe, ob Box wie eine Kerze aussieht (schmal und hoch) if (ymax - ymin) / (xmax - xmin) > 2: # Verhältnis für Kerzen candle_roi = image_cv[ymin:ymax, xmin:xmax] if candle_roi.size == 0: logger.warning("Empty ROI for box: (%d, %d, %d, %d)", xmin, ymin, xmax, ymax) continue mean_color = np.mean(candle_roi, axis=(0, 1)).astype(int) color_rgb = f"RGB({mean_color[2]},{mean_color[1]},{mean_color[0]})" # OCR für Preise (erweiterte ROI) price_roi = image_cv[max(0, ymin-200):min(image_np.shape[0], ymax+200), max(0, xmin-200):min(image_np.shape[1], xmax+200)] ocr_results = reader.readtext(price_roi, detail=0) prices = " ".join(ocr_results) if ocr_results else "No price detected" detections.append({ "pattern": "Candle", "color": color_rgb, "prices": prices, "x_center": (xmin + xmax) / 2 }) # Sortiere nach x-Position (rechts nach links = neueste Kerzen) detections = sorted(detections, key=lambda x: x["x_center"], reverse=True)[:8] logger.info("Sorted detections: %d", len(detections)) if not detections: logger.warning("No candlesticks detected. Ensure clear image with visible candles.") return {"prompt": prompt, "description": "No candlesticks detected. Try a clearer screenshot with visible candles and prices."} return {"prompt": prompt, "detections": detections} except Exception as e: logger.error("Failed to analyze candles: %s", str(e)) return {"prompt": prompt, "description": "Error analyzing candles. Try a clearer screenshot with visible candles and prices."} else: return {"prompt": prompt, "description": "Unsupported prompt. Use 'Was siehst du auf dem Bild?' or 'List last 8 candles with their colors'."} # Erstelle Gradio-Schnittstelle iface = gr.Interface( fn=analyze_image, inputs=[ gr.Image(type="pil", label="Upload an Image"), gr.Textbox(label="Prompt", placeholder="Enter your prompt, e.g., 'Was siehst du auf dem Bild?' or 'List last 8 candles with their colors'") ], outputs="json", title="Chart Analysis with YOLOv8 and EasyOCR", description="Upload a TradingView screenshot to analyze candlesticks or get a general description." ) iface.launch()