import cv2 import numpy as np import onnxruntime as ort # --- CONFIGURATION --- MODEL_PATH = "meiki.text.detect.small.v0.onnx" INPUT_IMAGE_PATH = "input.jpg" OUTPUT_IMAGE_PATH = "output.small.jpg" # The model expects a 640x640 RGB image. MODEL_SIZE = 640 # A threshold to filter out weak detections. # You can adjust this value (e.g., lower to 0.3 for more boxes, # or raise to 0.5 for fewer, more confident boxes). CONFIDENCE_THRESHOLD = 0.4 def resize_and_pad(image: np.ndarray, size: int): """ Resizes a COLOR image to the model's expected size, maintaining aspect ratio and padding. Returns: - The padded image ready for the model. - The ratio used to resize the image. - The padding amounts (width, height). """ # Get the original image dimensions. original_height, original_width, _ = image.shape # Calculate the ratio to resize the image. ratio = min(size / original_width, size / original_height) new_width = int(original_width * ratio) new_height = int(original_height * ratio) # Resize the image using the calculated ratio. resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR) # Create a new square image (640x640) filled with zeros (black). # Note the `(size, size, 3)` for the 3 color channels (BGR). padded_image = np.zeros((size, size, 3), dtype=np.uint8) # Calculate padding to center the resized image. pad_w = (size - new_width) // 2 pad_h = (size - new_height) // 2 # Paste the resized image onto the center of the black square. padded_image[pad_h:pad_h + new_height, pad_w:pad_w + new_width] = resized_image return padded_image, ratio, pad_w, pad_h def main(): """ Main function to run the inference process. """ # --- 1. Load the Model --- try: # Create an inference session with the ONNX model. session = ort.InferenceSession(MODEL_PATH, providers=['CPUExecutionProvider']) print(f"Successfully loaded model: {MODEL_PATH}") except Exception as e: print(f"Error: Failed to load the ONNX model. Make sure '{MODEL_PATH}' exists.") print(f"Details: {e}") return # --- 2. Load and Pre-process the Input Image --- try: # Read the input image from the file. It will be in BGR format by default. original_image = cv2.imread(INPUT_IMAGE_PATH) if original_image is None: raise FileNotFoundError(f"Image not found at '{INPUT_IMAGE_PATH}'") print(f"Successfully loaded image: {INPUT_IMAGE_PATH}") except Exception as e: print(f"Error: {e}") return # This model requires a color image, so we don't convert to grayscale. # Resize and pad the image to fit the model's 640x640 input size. padded_image, ratio, pad_w, pad_h = resize_and_pad(original_image, MODEL_SIZE) # Normalize the image data to be between 0 and 1. img_normalized = padded_image.astype(np.float32) / 255.0 # The model expects the channel dimension to be first (Channels, Height, Width). # OpenCV loads images as (Height, Width, Channels), so we transpose the axes. img_transposed = np.transpose(img_normalized, (2, 0, 1)) # Add a batch dimension to match the model's expected input shape: (1, 3, 640, 640). image_input_tensor = np.expand_dims(img_transposed, axis=0) # --- 3. Run Inference --- # The model requires a second input specifying the image size. We provide the padded size. sizes_input_tensor = np.array([[MODEL_SIZE, MODEL_SIZE]], dtype=np.int64) # Get the names of the model's input nodes. input_names = [inp.name for inp in session.get_inputs()] # Prepare the dictionary of inputs for the model. inputs = { input_names[0]: image_input_tensor, input_names[1]: sizes_input_tensor } # Run the model. # This model returns three separate outputs: labels, boxes, and confidence scores. outputs = session.run(None, inputs) labels, boxes, scores = outputs # --- 4. Post-process and Draw Bounding Boxes --- # The outputs have an extra batch dimension, so we remove it. boxes = boxes[0] scores = scores[0] print(f"Model returned {len(boxes)} boxes. Filtering with confidence > {CONFIDENCE_THRESHOLD}...") # Create a copy of the original image to draw on. output_image = original_image.copy() # Iterate through the boxes and their corresponding scores. confident_boxes_count = 0 for box, score in zip(boxes, scores): # Only process boxes with a confidence score above our threshold. if score > CONFIDENCE_THRESHOLD: confident_boxes_count += 1 # The coordinates from the model are relative to the 640x640 padded image. # We need to scale them back to the original image's coordinate space. x_min, y_min, x_max, y_max = box # Step 1: Subtract the padding that was added. x_min_unpadded = x_min - pad_w y_min_unpadded = y_min - pad_h x_max_unpadded = x_max - pad_w y_max_unpadded = y_max - pad_h # Step 2: Scale the coordinates back up to the original image size by dividing by the ratio. final_x_min = int(x_min_unpadded / ratio) final_y_min = int(y_min_unpadded / ratio) final_x_max = int(x_max_unpadded / ratio) final_y_max = int(y_max_unpadded / ratio) # Draw a green rectangle on the output image. cv2.rectangle(output_image, (final_x_min, final_y_min), (final_x_max, final_y_max), (0, 255, 0), 2) print(f"Found {confident_boxes_count} confident boxes.") # --- 5. Save the Final Image --- cv2.imwrite(OUTPUT_IMAGE_PATH, output_image) print(f"Successfully saved result to: {OUTPUT_IMAGE_PATH}") if __name__ == "__main__": main()