Update app.py
Browse files
app.py
CHANGED
|
@@ -1494,7 +1494,7 @@ class Hive:
|
|
| 1494 |
|
| 1495 |
if self.lite_mode:
|
| 1496 |
prompt = f"<|user|>\n{message}</s>\n<|assistant|>\n"
|
| 1497 |
-
full_reply = "".join(list(self.chat_stream(prompt, max_new_tokens, temp)))
|
| 1498 |
return full_reply
|
| 1499 |
|
| 1500 |
kk = k if k is not None else (self.retrieval_k if hasattr(self, 'retrieval_k') else 6)
|
|
@@ -1665,7 +1665,7 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1665 |
|
| 1666 |
if hive_instance.lite_mode:
|
| 1667 |
# Lite mode: direct, non-streaming response.
|
| 1668 |
-
reply = hive_instance.chat(sanitized_m, eff, current_user_id)
|
| 1669 |
messages_hist.append({"role": "assistant", "content": reply or "[No response from model]"})
|
| 1670 |
yield messages_hist, gr.Textbox(value="", interactive=True)
|
| 1671 |
else:
|
|
@@ -1782,7 +1782,7 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1782 |
"""Waits for ASR/TTS models and enables voice-related UI elements."""
|
| 1783 |
bootstrap_instance.voice_ready.wait()
|
| 1784 |
bootstrap_instance.hive_ready.wait() # Also wait for full core for voice features
|
| 1785 |
-
hive_instance = get_hive_instance()
|
| 1786 |
|
| 1787 |
voice_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'asr_service') and hasattr(hive_instance, 'tts_service')
|
| 1788 |
video_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'video_service') and CFG["VIDEO_ENABLED"] # type: ignore
|
|
@@ -1803,7 +1803,7 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1803 |
demo.load(wait_for_voice_features, None, [voice_status_md, ptt_audio_in, ptt_transcript, ptt_transcribe_btn, ptt_chat_btn, vocal_chat_btn, enroll_audio, enroll_btn, who_btn, camera_status_md, video_out], show_progress="hidden")
|
| 1804 |
def stream_video():
|
| 1805 |
"""Streams video frames from the VideoService to the UI."""
|
| 1806 |
-
hive_instance = get_hive_instance()
|
| 1807 |
if not (
|
| 1808 |
hive_instance and not hive_instance.lite_mode and
|
| 1809 |
hasattr(hive_instance, 'video_service') and hive_instance.video_service and
|
|
@@ -1821,7 +1821,7 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1821 |
demo.load(stream_video, None, video_out)
|
| 1822 |
|
| 1823 |
def do_online_update():
|
| 1824 |
-
hive_instance = get_hive_instance()
|
| 1825 |
if hive_instance.lite_mode: return "Online features are disabled in Lite Mode." # type: ignore
|
| 1826 |
return "Added %s new summaries to curves." % (hive_instance.online_update().get("added",0))
|
| 1827 |
|
|
@@ -1834,7 +1834,7 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1834 |
|
| 1835 |
# This function is now the core of the hands-free mode, using the new VADService.
|
| 1836 |
def process_vocal_chat_stream(stream, state, uid, role, mode, chatbot_history, request: gr.Request): # type: ignore
|
| 1837 |
-
now = time.time()
|
| 1838 |
hive_instance = get_hive_instance() # type: ignore
|
| 1839 |
if hive_instance.lite_mode or not hasattr(hive_instance, 'vad_service') or not hive_instance.vad_service: # type: ignore
|
| 1840 |
return None, state, chatbot_history, "VAD service not ready."
|
|
@@ -2097,6 +2097,20 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 2097 |
share=os.getenv("GRADIO_SHARE", "false").lower() == "true"
|
| 2098 |
); return demo
|
| 2099 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2100 |
class Bootstrap:
|
| 2101 |
"""Handles the entire application startup sequence cleanly."""
|
| 2102 |
def __init__(self, config: Dict):
|
|
@@ -2223,7 +2237,7 @@ class Bootstrap:
|
|
| 2223 |
def _init_full_core(self):
|
| 2224 |
"""Initializes all features of the full Hive core."""
|
| 2225 |
logging.info("Initializing Full Hive Core...") # Added logging
|
| 2226 |
-
#
|
| 2227 |
llm_thread = threading.Thread(target=lambda: get_hive_instance(lite=False, caps=self.caps), daemon=True)
|
| 2228 |
asr_thread = threading.Thread(target=get_asr, daemon=True)
|
| 2229 |
tts_thread = threading.Thread(target=lambda: get_tts(CFG["TTS_LANG"]), daemon=True)
|
|
|
|
| 1494 |
|
| 1495 |
if self.lite_mode:
|
| 1496 |
prompt = f"<|user|>\n{message}</s>\n<|assistant|>\n"
|
| 1497 |
+
full_reply = "".join(list(self.chat_stream(prompt, max_new_tokens=max_new_tokens, temperature=temp)))
|
| 1498 |
return full_reply
|
| 1499 |
|
| 1500 |
kk = k if k is not None else (self.retrieval_k if hasattr(self, 'retrieval_k') else 6)
|
|
|
|
| 1665 |
|
| 1666 |
if hive_instance.lite_mode:
|
| 1667 |
# Lite mode: direct, non-streaming response.
|
| 1668 |
+
reply = hive_instance.chat(sanitized_m, eff, current_user_id, max_new_tokens=256, temperature=0.7)
|
| 1669 |
messages_hist.append({"role": "assistant", "content": reply or "[No response from model]"})
|
| 1670 |
yield messages_hist, gr.Textbox(value="", interactive=True)
|
| 1671 |
else:
|
|
|
|
| 1782 |
"""Waits for ASR/TTS models and enables voice-related UI elements."""
|
| 1783 |
bootstrap_instance.voice_ready.wait()
|
| 1784 |
bootstrap_instance.hive_ready.wait() # Also wait for full core for voice features
|
| 1785 |
+
hive_instance = get_hive_instance(bootstrap_instance)
|
| 1786 |
|
| 1787 |
voice_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'asr_service') and hasattr(hive_instance, 'tts_service')
|
| 1788 |
video_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'video_service') and CFG["VIDEO_ENABLED"] # type: ignore
|
|
|
|
| 1803 |
demo.load(wait_for_voice_features, None, [voice_status_md, ptt_audio_in, ptt_transcript, ptt_transcribe_btn, ptt_chat_btn, vocal_chat_btn, enroll_audio, enroll_btn, who_btn, camera_status_md, video_out], show_progress="hidden")
|
| 1804 |
def stream_video():
|
| 1805 |
"""Streams video frames from the VideoService to the UI."""
|
| 1806 |
+
hive_instance = get_hive_instance(bootstrap_instance)
|
| 1807 |
if not (
|
| 1808 |
hive_instance and not hive_instance.lite_mode and
|
| 1809 |
hasattr(hive_instance, 'video_service') and hive_instance.video_service and
|
|
|
|
| 1821 |
demo.load(stream_video, None, video_out)
|
| 1822 |
|
| 1823 |
def do_online_update():
|
| 1824 |
+
hive_instance = get_hive_instance(bootstrap_instance)
|
| 1825 |
if hive_instance.lite_mode: return "Online features are disabled in Lite Mode." # type: ignore
|
| 1826 |
return "Added %s new summaries to curves." % (hive_instance.online_update().get("added",0))
|
| 1827 |
|
|
|
|
| 1834 |
|
| 1835 |
# This function is now the core of the hands-free mode, using the new VADService.
|
| 1836 |
def process_vocal_chat_stream(stream, state, uid, role, mode, chatbot_history, request: gr.Request): # type: ignore
|
| 1837 |
+
now = time.time() # type: ignore
|
| 1838 |
hive_instance = get_hive_instance() # type: ignore
|
| 1839 |
if hive_instance.lite_mode or not hasattr(hive_instance, 'vad_service') or not hive_instance.vad_service: # type: ignore
|
| 1840 |
return None, state, chatbot_history, "VAD service not ready."
|
|
|
|
| 2097 |
share=os.getenv("GRADIO_SHARE", "false").lower() == "true"
|
| 2098 |
); return demo
|
| 2099 |
|
| 2100 |
+
def get_hive_instance(bootstrap_instance: "Bootstrap"):
|
| 2101 |
+
"""
|
| 2102 |
+
Global function to safely get the current Hive instance.
|
| 2103 |
+
It prioritizes the full instance if ready, otherwise falls back to the lite one.
|
| 2104 |
+
"""
|
| 2105 |
+
global HIVE_INSTANCE
|
| 2106 |
+
if bootstrap_instance.hive_ready.is_set() and bootstrap_instance.hive_instance:
|
| 2107 |
+
if HIVE_INSTANCE is None or HIVE_INSTANCE.lite_mode:
|
| 2108 |
+
HIVE_INSTANCE = bootstrap_instance.hive_instance
|
| 2109 |
+
elif HIVE_INSTANCE is None and bootstrap_instance.lite_core_ready.is_set():
|
| 2110 |
+
HIVE_INSTANCE = bootstrap_instance.hive_lite_instance
|
| 2111 |
+
return HIVE_INSTANCE
|
| 2112 |
+
|
| 2113 |
+
|
| 2114 |
class Bootstrap:
|
| 2115 |
"""Handles the entire application startup sequence cleanly."""
|
| 2116 |
def __init__(self, config: Dict):
|
|
|
|
| 2237 |
def _init_full_core(self):
|
| 2238 |
"""Initializes all features of the full Hive core."""
|
| 2239 |
logging.info("Initializing Full Hive Core...") # Added logging
|
| 2240 |
+
# This is now correctly calling the global get_hive_instance
|
| 2241 |
llm_thread = threading.Thread(target=lambda: get_hive_instance(lite=False, caps=self.caps), daemon=True)
|
| 2242 |
asr_thread = threading.Thread(target=get_asr, daemon=True)
|
| 2243 |
tts_thread = threading.Thread(target=lambda: get_tts(CFG["TTS_LANG"]), daemon=True)
|