Spaces:
Sleeping
Sleeping
update: added yolo v8 defense
Browse files- demo.html +3 -2
- mission_planner.py +3 -12
- mission_summarizer.py +3 -9
- models/detectors/yolov8_defence.py +12 -0
- models/model_loader.py +2 -0
- prompt.py +37 -0
demo.html
CHANGED
|
@@ -123,8 +123,9 @@ button:hover {
|
|
| 123 |
|
| 124 |
<label class="label">OBJECT DETECTOR</label>
|
| 125 |
<select id="detectorSelect">
|
| 126 |
-
<option value="owlv2" selected>OWL-V2
|
| 127 |
-
<option value="hf_yolov8">YOLOv8
|
|
|
|
| 128 |
</select>
|
| 129 |
|
| 130 |
<button onclick="executeMission()">EXECUTE MISSION</button>
|
|
|
|
| 123 |
|
| 124 |
<label class="label">OBJECT DETECTOR</label>
|
| 125 |
<select id="detectorSelect">
|
| 126 |
+
<option value="owlv2" selected>OWL-V2</option>
|
| 127 |
+
<option value="hf_yolov8">YOLOv8</option>
|
| 128 |
+
<option value="hf_yolov8_defence">YOLOv8m Defence</option>
|
| 129 |
</select>
|
| 130 |
|
| 131 |
<button onclick="executeMission()">EXECUTE MISSION</button>
|
mission_planner.py
CHANGED
|
@@ -5,6 +5,7 @@ import logging
|
|
| 5 |
from dataclasses import asdict, dataclass
|
| 6 |
from typing import Dict, List, Tuple
|
| 7 |
|
|
|
|
| 8 |
from utils.openai_client import get_openai_client
|
| 9 |
|
| 10 |
|
|
@@ -140,18 +141,8 @@ class MissionReasoner:
|
|
| 140 |
|
| 141 |
def _query_llm(self, mission: str) -> Dict[str, object]:
|
| 142 |
client = get_openai_client()
|
| 143 |
-
system_prompt = (
|
| 144 |
-
|
| 145 |
-
"You must only reference the provided list of YOLO classes."
|
| 146 |
-
)
|
| 147 |
-
classes_blob = ", ".join(YOLO_CLASSES)
|
| 148 |
-
user_prompt = (
|
| 149 |
-
f"Mission: {mission}\n"
|
| 150 |
-
f"Available YOLO classes: {classes_blob}\n"
|
| 151 |
-
f"Return JSON with: mission (string) and classes (array). "
|
| 152 |
-
f"Each entry needs name, score (0-1 float), rationale. "
|
| 153 |
-
f"Limit to at most {self._top_k} classes. Only choose names from the list."
|
| 154 |
-
)
|
| 155 |
completion = client.chat.completions.create(
|
| 156 |
model=self._model_name,
|
| 157 |
temperature=0.2,
|
|
|
|
| 5 |
from dataclasses import asdict, dataclass
|
| 6 |
from typing import Dict, List, Tuple
|
| 7 |
|
| 8 |
+
from prompt import mission_planner_system_prompt, mission_planner_user_prompt
|
| 9 |
from utils.openai_client import get_openai_client
|
| 10 |
|
| 11 |
|
|
|
|
| 141 |
|
| 142 |
def _query_llm(self, mission: str) -> Dict[str, object]:
|
| 143 |
client = get_openai_client()
|
| 144 |
+
system_prompt = mission_planner_system_prompt()
|
| 145 |
+
user_prompt = mission_planner_user_prompt(mission, YOLO_CLASSES, self._top_k)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
completion = client.chat.completions.create(
|
| 147 |
model=self._model_name,
|
| 148 |
temperature=0.2,
|
mission_summarizer.py
CHANGED
|
@@ -5,6 +5,7 @@ import logging
|
|
| 5 |
from typing import Any, Dict, List
|
| 6 |
|
| 7 |
from mission_planner import MissionPlan
|
|
|
|
| 8 |
from utils.openai_client import get_openai_client
|
| 9 |
|
| 10 |
SUMMARY_MODEL = "gpt-4o-mini"
|
|
@@ -60,19 +61,12 @@ def summarize_results(
|
|
| 60 |
],
|
| 61 |
}
|
| 62 |
|
| 63 |
-
system_prompt = (
|
| 64 |
-
"You are a surveillance analyst. Review structured detections aligned to a mission and summarize actionable "
|
| 65 |
-
"insights, highlighting objects of interest, temporal trends, and any security concerns. "
|
| 66 |
-
"Base conclusions solely on the provided data; if nothing is detected, explicitly state that."
|
| 67 |
-
)
|
| 68 |
messages = [
|
| 69 |
{"role": "system", "content": system_prompt},
|
| 70 |
{
|
| 71 |
"role": "user",
|
| 72 |
-
"content": (
|
| 73 |
-
"Use this JSON to summarize the mission outcome:\n"
|
| 74 |
-
f"{json.dumps(payload, ensure_ascii=False)}"
|
| 75 |
-
),
|
| 76 |
},
|
| 77 |
]
|
| 78 |
|
|
|
|
| 5 |
from typing import Any, Dict, List
|
| 6 |
|
| 7 |
from mission_planner import MissionPlan
|
| 8 |
+
from prompt import mission_summarizer_system_prompt, mission_summarizer_user_prompt
|
| 9 |
from utils.openai_client import get_openai_client
|
| 10 |
|
| 11 |
SUMMARY_MODEL = "gpt-4o-mini"
|
|
|
|
| 61 |
],
|
| 62 |
}
|
| 63 |
|
| 64 |
+
system_prompt = mission_summarizer_system_prompt()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
messages = [
|
| 66 |
{"role": "system", "content": system_prompt},
|
| 67 |
{
|
| 68 |
"role": "user",
|
| 69 |
+
"content": mission_summarizer_user_prompt(json.dumps(payload, ensure_ascii=False)),
|
|
|
|
|
|
|
|
|
|
| 70 |
},
|
| 71 |
]
|
| 72 |
|
models/detectors/yolov8_defence.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from models.detectors.yolov8 import HuggingFaceYoloV8Detector
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class HuggingFaceYoloV8DefenceDetector(HuggingFaceYoloV8Detector):
|
| 5 |
+
"""YOLOv8m detector fine-tuned on defence data hosted on Hugging Face."""
|
| 6 |
+
|
| 7 |
+
REPO_ID = "spencercdz/YOLOv8m_defence"
|
| 8 |
+
WEIGHT_FILE = "yolov8m_defence.pt"
|
| 9 |
+
|
| 10 |
+
def __init__(self, score_threshold: float = 0.3) -> None:
|
| 11 |
+
super().__init__(score_threshold=score_threshold)
|
| 12 |
+
self.name = "hf_yolov8_defence"
|
models/model_loader.py
CHANGED
|
@@ -5,12 +5,14 @@ from typing import Callable, Dict, Optional
|
|
| 5 |
from models.detectors.base import ObjectDetector
|
| 6 |
from models.detectors.owlv2 import Owlv2Detector
|
| 7 |
from models.detectors.yolov8 import HuggingFaceYoloV8Detector
|
|
|
|
| 8 |
|
| 9 |
DEFAULT_DETECTOR = "owlv2"
|
| 10 |
|
| 11 |
_REGISTRY: Dict[str, Callable[[], ObjectDetector]] = {
|
| 12 |
"owlv2": Owlv2Detector,
|
| 13 |
"hf_yolov8": HuggingFaceYoloV8Detector,
|
|
|
|
| 14 |
}
|
| 15 |
|
| 16 |
|
|
|
|
| 5 |
from models.detectors.base import ObjectDetector
|
| 6 |
from models.detectors.owlv2 import Owlv2Detector
|
| 7 |
from models.detectors.yolov8 import HuggingFaceYoloV8Detector
|
| 8 |
+
from models.detectors.yolov8_defence import HuggingFaceYoloV8DefenceDetector
|
| 9 |
|
| 10 |
DEFAULT_DETECTOR = "owlv2"
|
| 11 |
|
| 12 |
_REGISTRY: Dict[str, Callable[[], ObjectDetector]] = {
|
| 13 |
"owlv2": Owlv2Detector,
|
| 14 |
"hf_yolov8": HuggingFaceYoloV8Detector,
|
| 15 |
+
"hf_yolov8_defence": HuggingFaceYoloV8DefenceDetector,
|
| 16 |
}
|
| 17 |
|
| 18 |
|
prompt.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Centralized prompt builders for all LLM calls."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Iterable
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def mission_planner_system_prompt() -> str:
|
| 9 |
+
return (
|
| 10 |
+
"You are a mission-planning assistant helping a vision system select which YOLO object "
|
| 11 |
+
"classes to detect. You must only reference the provided list of YOLO classes."
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def mission_planner_user_prompt(mission: str, available_classes: Iterable[str], top_k: int) -> str:
|
| 16 |
+
classes_blob = ", ".join(available_classes)
|
| 17 |
+
return (
|
| 18 |
+
f"Mission: {mission}\n"
|
| 19 |
+
f"Available YOLO classes: {classes_blob}\n"
|
| 20 |
+
"Return JSON with: mission (string) and classes (array). "
|
| 21 |
+
"Each entry needs name, score (0-1 float), rationale. "
|
| 22 |
+
f"Limit to at most {top_k} classes. Only choose names from the list."
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def mission_summarizer_system_prompt() -> str:
|
| 27 |
+
return (
|
| 28 |
+
"You are a surveillance analyst. Review structured detections aligned to a mission and "
|
| 29 |
+
"summarize actionable insights, highlighting objects of interest, temporal trends, and any "
|
| 30 |
+
"security concerns. Base conclusions solely on the provided data; if nothing is detected, "
|
| 31 |
+
"explicitly state that."
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def mission_summarizer_user_prompt(payload_json: str) -> str:
|
| 36 |
+
return "Use this JSON to summarize the mission outcome:\n" f"{payload_json}"
|
| 37 |
+
|