Update README.md
Browse files
README.md
CHANGED
|
@@ -29,34 +29,32 @@ The model was trained for 100 epochs with a batch size of 16 using dual NVIDIA T
|
|
| 29 |
# Example Usage
|
| 30 |
|
| 31 |
```python
|
| 32 |
-
#
|
| 33 |
import cv2
|
| 34 |
from ultralytics import YOLO
|
| 35 |
-
|
| 36 |
from pathlib import Path
|
| 37 |
import matplotlib.pyplot as plt
|
| 38 |
-
|
| 39 |
from huggingface_hub import hf_hub_download
|
| 40 |
|
| 41 |
|
| 42 |
-
#
|
| 43 |
model_path = hf_hub_download(repo_id="Daniil-Domino/yolo11x-text-detection", filename="model.pt")
|
| 44 |
|
| 45 |
-
#
|
| 46 |
model = YOLO(model_path)
|
| 47 |
|
| 48 |
-
#
|
| 49 |
image_path = "/path/to/image"
|
| 50 |
image = cv2.imread(image_path).copy()
|
| 51 |
output = model.predict(image, conf=0.3)
|
| 52 |
|
|
|
|
| 53 |
out_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 54 |
-
|
| 55 |
for data in output[0].boxes.data.tolist():
|
| 56 |
xmin, ymin, xmax, ymax, _, _ = map(int, data)
|
| 57 |
-
|
| 58 |
cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), color=(0, 0, 255), thickness=3)
|
| 59 |
-
|
|
|
|
| 60 |
plt.figure(figsize=(15, 10))
|
| 61 |
plt.imshow(out_image)
|
| 62 |
plt.axis('off')
|
|
@@ -64,8 +62,7 @@ plt.show()
|
|
| 64 |
```
|
| 65 |
|
| 66 |
# Metrics
|
| 67 |
-
Below are the key metrics
|
| 68 |
-
|
| 69 |
- **Precision**: 0.929
|
| 70 |
- **Recall**: 0.937
|
| 71 |
- **mAP50**: 0.966
|
|
|
|
| 29 |
# Example Usage
|
| 30 |
|
| 31 |
```python
|
| 32 |
+
# Load libraries
|
| 33 |
import cv2
|
| 34 |
from ultralytics import YOLO
|
|
|
|
| 35 |
from pathlib import Path
|
| 36 |
import matplotlib.pyplot as plt
|
|
|
|
| 37 |
from huggingface_hub import hf_hub_download
|
| 38 |
|
| 39 |
|
| 40 |
+
# Download model
|
| 41 |
model_path = hf_hub_download(repo_id="Daniil-Domino/yolo11x-text-detection", filename="model.pt")
|
| 42 |
|
| 43 |
+
# Load model
|
| 44 |
model = YOLO(model_path)
|
| 45 |
|
| 46 |
+
# Inference
|
| 47 |
image_path = "/path/to/image"
|
| 48 |
image = cv2.imread(image_path).copy()
|
| 49 |
output = model.predict(image, conf=0.3)
|
| 50 |
|
| 51 |
+
# Draw bounding boxes
|
| 52 |
out_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
|
|
| 53 |
for data in output[0].boxes.data.tolist():
|
| 54 |
xmin, ymin, xmax, ymax, _, _ = map(int, data)
|
|
|
|
| 55 |
cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), color=(0, 0, 255), thickness=3)
|
| 56 |
+
|
| 57 |
+
# Display result
|
| 58 |
plt.figure(figsize=(15, 10))
|
| 59 |
plt.imshow(out_image)
|
| 60 |
plt.axis('off')
|
|
|
|
| 62 |
```
|
| 63 |
|
| 64 |
# Metrics
|
| 65 |
+
Below are the key evaluation metrics on the validation set:
|
|
|
|
| 66 |
- **Precision**: 0.929
|
| 67 |
- **Recall**: 0.937
|
| 68 |
- **mAP50**: 0.966
|