Spaces:
Running
Running
| # app_yolo11_blur_nomargin.py | |
| from pathlib import Path | |
| import cv2 | |
| import numpy as np | |
| import os, time, tempfile | |
| import gradio as gr | |
| from ultralytics import YOLO | |
| from huggingface_hub import hf_hub_download | |
| from PIL import Image | |
| # ========= CONFIG ========= | |
| DEVICE = "cpu" # "cpu" ou "cuda:0" si GPU visible | |
| CONF_DEFAULT = 0.30 | |
| IOU_DEFAULT = 0.50 | |
| BLUR_DEFAULT = 0.28 | |
| FEATHER_DEFAULT = 8 | |
| weights_path = hf_hub_download( | |
| repo_id="morsetechlab/yolov11-license-plate-detection", | |
| filename="license-plate-finetune-v1l.pt" | |
| ) | |
| model = YOLO(weights_path) | |
| # ========= BLUR SANS MARGE (bord doux interne) ========= | |
| def blur_bbox_nomargin(img, x1, y1, x2, y2, blur_strength=BLUR_DEFAULT, feather=FEATHER_DEFAULT): | |
| H, W = img.shape[:2] | |
| x1, y1 = max(0, x1), max(0, y1) | |
| x2, y2 = min(W, x2), min(H, y2) | |
| w, h = x2 - x1, y2 - y1 | |
| if w <= 0 or h <= 0: | |
| return img | |
| # flou global puis composition dans la bbox via masque (aucune marge) | |
| k = int(max(w, h) * blur_strength) | |
| k = max(31, (k // 2) * 2 + 1) # noyau impair, min 31 => flou smooth | |
| blurred_full = cv2.GaussianBlur(img, (k, k), 0) | |
| mask = np.zeros((H, W), dtype=np.uint8) | |
| cv2.rectangle(mask, (x1, y1), (x2, y2), 255, thickness=-1) | |
| if feather > 0: | |
| # feather vers l'intérieur du rectangle (ne déborde pas) | |
| obj = (mask > 0).astype(np.uint8) | |
| dist = cv2.distanceTransform(obj, distanceType=cv2.DIST_L2, maskSize=3) | |
| alpha = np.clip(dist / float(feather), 0.0, 1.0) | |
| else: | |
| alpha = (mask.astype(np.float32) / 255.0) | |
| alpha = alpha[..., None] | |
| out = (alpha * blurred_full + (1.0 - alpha) * img).astype(np.uint8) | |
| return out | |
| # ========= PIPELINE (avec étapes UI) ========= | |
| def process_and_toggle(image): | |
| """ | |
| Générateur pour: | |
| 1) cacher l'étape 1, montrer l'étape 2 (message 'Traitement...') | |
| 2) faire le traitement, produire l'image + fichier .jpg + log | |
| """ | |
| # 1) toggle immédiat | |
| yield (None, None, gr.update(visible=False), gr.update(visible=True)) | |
| # 2) traitement | |
| if image is None: | |
| yield (None, None, gr.update(visible=True), gr.update(visible=False)) | |
| return | |
| bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) | |
| results = model.predict(source=bgr, conf=CONF_DEFAULT, iou=IOU_DEFAULT, device=DEVICE, verbose=False) | |
| out = bgr.copy() | |
| total = 0 | |
| for r in results: | |
| if r.boxes is None: | |
| continue | |
| for (x1, y1, x2, y2) in r.boxes.xyxy.cpu().numpy().astype(int): | |
| # feather effectif borné à la moitié de la bbox (évite artefacts) | |
| w, h = x2 - x1, y2 - y1 | |
| f_eff = min(int(FEATHER_DEFAULT), max(w // 2 - 1, 0), max(h // 2 - 1, 0)) | |
| out = blur_bbox_nomargin(out, x1, y1, x2, y2, | |
| blur_strength=float(BLUR_DEFAULT), | |
| feather=f_eff) | |
| total += 1 | |
| out_rgb = cv2.cvtColor(out, cv2.COLOR_BGR2RGB) | |
| # Fichier .JPG | |
| ts = time.strftime("%Y%m%d_%H%M%S") | |
| fname = f"plaque_floutee_{ts}.jpg" | |
| fpath = os.path.join(tempfile.gettempdir(), fname) | |
| Image.fromarray(out_rgb).save(fpath, format="JPEG", quality=92) | |
| yield (out_rgb, fpath, gr.update(visible=False), gr.update(visible=True)) | |
| def reset_ui(): | |
| # réinitialise l'affichage (retour à l'étape 1) | |
| return None, None, gr.update(visible=True), gr.update(visible=False) | |
| # ========= UI ========= | |
| with gr.Blocks(title="🚗 Floutage de plaques d'immatriculation") as demo: | |
| gr.Markdown("### 📸 Dépose une image 🟦 Clique sur **Flouter** 💾 Télécharge ta voiture avec plaque d'immatriculation flouté") | |
| step1 = gr.Column(visible=True) | |
| with step1: | |
| with gr.Row(): | |
| with gr.Column(): | |
| inp = gr.Image(type="numpy", label="Image") | |
| btn = gr.Button("Flouter 🔒") | |
| step2 = gr.Column(visible=False) | |
| with step2: | |
| out_img = gr.Image(type="numpy", label="Aperçu flouté 👀") | |
| out_file = gr.File(label="Télécharger en JPG 💾") | |
| back_btn = gr.Button("Recommencer 🔄") | |
| # clic: on stream d'abord le toggle, puis l'image/fichier | |
| btn.click( | |
| fn=process_and_toggle, | |
| inputs=[inp], | |
| outputs=[out_img, out_file, step1, step2], | |
| queue=True # permet le 'yield' | |
| ) | |
| # bouton retour | |
| back_btn.click( | |
| fn=reset_ui, | |
| inputs=[], | |
| outputs=[out_img, out_file, step1, step2], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")), show_footer=False) |