Update app.py
Browse files
app.py
CHANGED
|
@@ -1646,51 +1646,48 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1646 |
return "".join(ch for ch in text if unicodedata.category(ch)[0] != "C").strip()
|
| 1647 |
|
| 1648 |
def talk(m, uid, role, mode, hist, request: gr.Request): # type: ignore
|
| 1649 |
-
|
| 1650 |
session_id = request.session_hash
|
| 1651 |
# Use session_id for guests, uid for logged-in users
|
| 1652 |
current_user_id = uid or session_id
|
| 1653 |
|
| 1654 |
sanitized_m = _sanitize_input(m)
|
| 1655 |
if not sanitized_m:
|
| 1656 |
-
hist
|
| 1657 |
-
yield hist, gr.Textbox(value="")
|
| 1658 |
return
|
| 1659 |
|
| 1660 |
-
|
| 1661 |
-
|
| 1662 |
-
yield messages_hist, gr.Textbox(value="", interactive=False) # Show user message immediately, disable textbox
|
| 1663 |
|
| 1664 |
-
hive_instance = get_hive_instance() # type: ignore
|
| 1665 |
|
| 1666 |
if hive_instance.lite_mode:
|
| 1667 |
# Lite mode: direct, non-streaming response.
|
| 1668 |
-
reply = hive_instance.chat(sanitized_m,
|
| 1669 |
-
|
| 1670 |
-
yield
|
| 1671 |
else:
|
| 1672 |
# Full mode uses the DialogueManager for a streaming response.
|
| 1673 |
if not hasattr(hive_instance, 'dialogue_manager'):
|
| 1674 |
error_msg = "Dialogue Manager not available. Full core may still be initializing."
|
| 1675 |
-
|
| 1676 |
-
yield
|
| 1677 |
return
|
| 1678 |
-
|
| 1679 |
-
|
| 1680 |
try:
|
| 1681 |
-
|
|
|
|
| 1682 |
if chunk["type"] == "token":
|
| 1683 |
-
|
| 1684 |
-
|
| 1685 |
-
|
| 1686 |
-
|
| 1687 |
-
messages_hist[-1]["content"] = chunk["content"]
|
| 1688 |
-
yield messages_hist, gr.Textbox(value="", interactive=True)
|
| 1689 |
except Exception as e:
|
| 1690 |
error_msg = f"Error in DialogueManager: {e}"
|
| 1691 |
print(f"[ERROR] {error_msg}")
|
| 1692 |
-
|
| 1693 |
-
yield
|
| 1694 |
|
| 1695 |
msg.submit(talk, [msg, uid_state, role_state, mode_state, chatbot], [chatbot, msg], api_name="chat")
|
| 1696 |
|
|
@@ -1782,7 +1779,7 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 1782 |
"""Waits for ASR/TTS models and enables voice-related UI elements."""
|
| 1783 |
bootstrap_instance.voice_ready.wait()
|
| 1784 |
bootstrap_instance.hive_ready.wait() # Also wait for full core for voice features
|
| 1785 |
-
hive_instance = get_hive_instance(
|
| 1786 |
|
| 1787 |
voice_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'asr_service') and hasattr(hive_instance, 'tts_service')
|
| 1788 |
video_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'video_service') and CFG["VIDEO_ENABLED"] # type: ignore
|
|
@@ -2097,16 +2094,16 @@ def launch_ui(bootstrap_instance: "Bootstrap"):
|
|
| 2097 |
share=os.getenv("GRADIO_SHARE", "false").lower() == "true"
|
| 2098 |
); return demo
|
| 2099 |
|
| 2100 |
-
def get_hive_instance(bootstrap_instance: "Bootstrap"):
|
| 2101 |
"""
|
| 2102 |
Global function to safely get the current Hive instance.
|
| 2103 |
It prioritizes the full instance if ready, otherwise falls back to the lite one.
|
| 2104 |
"""
|
| 2105 |
global HIVE_INSTANCE
|
| 2106 |
-
if bootstrap_instance.hive_ready.is_set() and bootstrap_instance.hive_instance:
|
| 2107 |
if HIVE_INSTANCE is None or HIVE_INSTANCE.lite_mode:
|
| 2108 |
HIVE_INSTANCE = bootstrap_instance.hive_instance
|
| 2109 |
-
elif HIVE_INSTANCE is None and bootstrap_instance.lite_core_ready.is_set():
|
| 2110 |
HIVE_INSTANCE = bootstrap_instance.hive_lite_instance
|
| 2111 |
return HIVE_INSTANCE
|
| 2112 |
|
|
|
|
| 1646 |
return "".join(ch for ch in text if unicodedata.category(ch)[0] != "C").strip()
|
| 1647 |
|
| 1648 |
def talk(m, uid, role, mode, hist, request: gr.Request): # type: ignore
|
| 1649 |
+
effective_role = role if mode == "admin" else "user"
|
| 1650 |
session_id = request.session_hash
|
| 1651 |
# Use session_id for guests, uid for logged-in users
|
| 1652 |
current_user_id = uid or session_id
|
| 1653 |
|
| 1654 |
sanitized_m = _sanitize_input(m)
|
| 1655 |
if not sanitized_m:
|
| 1656 |
+
yield hist, gr.Textbox()
|
|
|
|
| 1657 |
return
|
| 1658 |
|
| 1659 |
+
current_history = (hist or []) + [{"role": "user", "content": sanitized_m}]
|
| 1660 |
+
yield current_history, gr.Textbox(value="", interactive=False) # Show user message, disable textbox
|
|
|
|
| 1661 |
|
| 1662 |
+
hive_instance = get_hive_instance(bootstrap_instance) # type: ignore
|
| 1663 |
|
| 1664 |
if hive_instance.lite_mode:
|
| 1665 |
# Lite mode: direct, non-streaming response.
|
| 1666 |
+
reply = hive_instance.chat(sanitized_m, effective_role, current_user_id, max_new_tokens=256, temperature=0.7)
|
| 1667 |
+
current_history.append({"role": "assistant", "content": reply or "[No response from model]"})
|
| 1668 |
+
yield current_history, gr.Textbox(value="", interactive=True)
|
| 1669 |
else:
|
| 1670 |
# Full mode uses the DialogueManager for a streaming response.
|
| 1671 |
if not hasattr(hive_instance, 'dialogue_manager'):
|
| 1672 |
error_msg = "Dialogue Manager not available. Full core may still be initializing."
|
| 1673 |
+
current_history.append({"role": "assistant", "content": error_msg})
|
| 1674 |
+
yield current_history, gr.Textbox(value="", interactive=True)
|
| 1675 |
return
|
| 1676 |
+
|
| 1677 |
+
current_history.append({"role": "assistant", "content": ""})
|
| 1678 |
try:
|
| 1679 |
+
# The dialogue manager needs the full history to maintain context.
|
| 1680 |
+
for chunk in hive_instance.dialogue_manager.process_turn(current_history, current_user_id, effective_role, session_id):
|
| 1681 |
if chunk["type"] == "token":
|
| 1682 |
+
current_history[-1]["content"] += chunk["content"]
|
| 1683 |
+
yield current_history, gr.Textbox(value="", interactive=False)
|
| 1684 |
+
# After the stream is complete, re-enable the textbox.
|
| 1685 |
+
yield current_history, gr.Textbox(placeholder=f"Talk to {CFG['AGENT_NAME']}", interactive=True)
|
|
|
|
|
|
|
| 1686 |
except Exception as e:
|
| 1687 |
error_msg = f"Error in DialogueManager: {e}"
|
| 1688 |
print(f"[ERROR] {error_msg}")
|
| 1689 |
+
current_history[-1]["content"] = f"An error occurred: {error_msg}"
|
| 1690 |
+
yield current_history, gr.Textbox(value="", interactive=True)
|
| 1691 |
|
| 1692 |
msg.submit(talk, [msg, uid_state, role_state, mode_state, chatbot], [chatbot, msg], api_name="chat")
|
| 1693 |
|
|
|
|
| 1779 |
"""Waits for ASR/TTS models and enables voice-related UI elements."""
|
| 1780 |
bootstrap_instance.voice_ready.wait()
|
| 1781 |
bootstrap_instance.hive_ready.wait() # Also wait for full core for voice features
|
| 1782 |
+
hive_instance = get_hive_instance() # type: ignore
|
| 1783 |
|
| 1784 |
voice_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'asr_service') and hasattr(hive_instance, 'tts_service')
|
| 1785 |
video_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'video_service') and CFG["VIDEO_ENABLED"] # type: ignore
|
|
|
|
| 2094 |
share=os.getenv("GRADIO_SHARE", "false").lower() == "true"
|
| 2095 |
); return demo
|
| 2096 |
|
| 2097 |
+
def get_hive_instance(bootstrap_instance: "Bootstrap", lite: Optional[bool] = None, caps: Optional[Dict] = None):
|
| 2098 |
"""
|
| 2099 |
Global function to safely get the current Hive instance.
|
| 2100 |
It prioritizes the full instance if ready, otherwise falls back to the lite one.
|
| 2101 |
"""
|
| 2102 |
global HIVE_INSTANCE
|
| 2103 |
+
if bootstrap_instance and bootstrap_instance.hive_ready.is_set() and bootstrap_instance.hive_instance:
|
| 2104 |
if HIVE_INSTANCE is None or HIVE_INSTANCE.lite_mode:
|
| 2105 |
HIVE_INSTANCE = bootstrap_instance.hive_instance
|
| 2106 |
+
elif bootstrap_instance and HIVE_INSTANCE is None and bootstrap_instance.lite_core_ready.is_set():
|
| 2107 |
HIVE_INSTANCE = bootstrap_instance.hive_lite_instance
|
| 2108 |
return HIVE_INSTANCE
|
| 2109 |
|