Paulhayes commited on
Commit
be4c36b
·
verified ·
1 Parent(s): 104387c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +646 -384
app.py CHANGED
@@ -1458,7 +1458,6 @@ class LibrarianCurve:
1458
  class LibrarianModule(LibrarianCurve, IModule):
1459
  def __init__(self, hive_instance: "Hive"):
1460
  IModule.__init__(self, hive_instance)
1461
- LibrarianCurve.__init__(self, hive_instance.store, hive_instance.k_store)
1462
 
1463
  class VoiceServicesModule(IModule):
1464
  def __init__(self, hive_instance: "Hive"):
@@ -1478,6 +1477,8 @@ class VoiceServicesModule(IModule):
1478
  # ----------- Hive core -----------
1479
  # --- Memory & Manifest Helpers (auto-inserted) ---
1480
  import tempfile, urllib.request, tarfile, zipfile
 
 
1481
  import importlib
1482
  from pathlib import Path as _Path
1483
 
@@ -1527,6 +1528,42 @@ class PromptCompiler:
1527
 
1528
  return f"{head} {insight}\n\nUser: {final_instruction}\nAssistant:"
1529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1530
  class Hive:
1531
  def __init__(self, model_id: Optional[str]=None, device: Optional[str]=None, caps: Optional[Dict]=None, lite: bool = False):
1532
  self.config = CFG
@@ -1534,23 +1571,7 @@ class Hive:
1534
  self.lite_mode = lite
1535
  self.module_manager = ModuleManager()
1536
  Hive.bootstrap_instance = None # Class attribute to hold bootstrap instance
1537
-
1538
- # Define Module classes here to resolve NameError during initialization
1539
- class KnowledgeStoreModule(KnowledgeStore, IModule):
1540
- def __init__(self, hive_instance: "Hive"): IModule.__init__(self, hive_instance); KnowledgeStore.__init__(self, hive_instance.config["HIVE_HOME"])
1541
- def start(self): pass
1542
- def stop(self): pass
1543
- class CurveStoreModule(CurveStore, IModule):
1544
- def __init__(self, hive_instance: "Hive"): IModule.__init__(self, hive_instance); CurveStore.__init__(self, hive_instance.config["CURVE_DIR"])
1545
- def start(self): pass
1546
- def stop(self): pass
1547
- class PersistenceEngine(IModule):
1548
- def __init__(self, hive_instance: "Hive"): IModule.__init__(self, hive_instance)
1549
- def start(self): pass
1550
- def stop(self): pass
1551
- class VoiceServicesModule(IModule):
1552
- def __init__(self, hive_instance: "Hive"): IModule.__init__(self, hive_instance)
1553
-
1554
  if not model_id:
1555
  model_id, info = pick_model(self.caps)
1556
  device = info.get("device", "cpu")
@@ -1569,21 +1590,29 @@ class Hive:
1569
 
1570
  def _init_full_mode(self):
1571
  """Initializes the Hive in full-featured mode."""
1572
- print("[Hive] Initializing in Full Mode.")
1573
- # CRITICAL FIX: Initialize kstore first as other modules depend on it.
1574
- self.module_manager.register("kstore", KnowledgeStoreModule(self))
1575
- self.module_manager.register("store", CurveStoreModule(self))
1576
- self.module_manager.register("librarian", LibrarianModule(self))
1577
- self.module_manager.register("engine", EngineModule(self))
1578
- self.module_manager.register("overlay", OverlayModule(self))
1579
- self.module_manager.register("changes", ChangeManagerModule(self))
1580
- self.module_manager.register("compiler", CompilerModule(self))
1581
- self.module_manager.register("voice_video", VoiceServicesModule(self))
1582
- self.module_manager.register("persistence", PersistenceEngine(self))
1583
- self.dialogue_manager = DialogueManager(self) # type: ignore
 
 
 
 
 
 
 
 
1584
 
 
1585
  self._setup_llm_pipeline()
1586
-
1587
  self.module_manager.start_all()
1588
 
1589
  def _load_local_model(self, trust: bool, **kwargs):
@@ -1776,366 +1805,595 @@ class Hive:
1776
  yield "[Error: Local model is not available]"
1777
  return
1778
  from transformers import TextIteratorStreamer
1779
-
1780
- # Create streamer and prepare inputs correctly on the target device
1781
  streamer = TextIteratorStreamer(self.tok, skip_prompt=True, skip_special_tokens=True)
1782
-
1783
- inputs = self.tok([prompt], return_tensors="pt")
1784
- try:
1785
- # Move each tensor to the configured device (cpu/cuda)
1786
- device = torch.device(self.device) if (torch is not None) else None
1787
- if device is not None:
1788
- inputs = {k: v.to(device) for k, v in inputs.items()}
1789
- except Exception:
1790
- # Best-effort: keep inputs as-is if device move fails
1791
- pass
1792
-
1793
- # Build explicit kwargs for model.generate (use input_ids / attention_mask)
1794
- gen_kwargs = {
1795
- "input_ids": inputs.get("input_ids"),
1796
- "attention_mask": inputs.get("attention_mask", None),
1797
- "streamer": streamer,
1798
- "max_new_tokens": int(max_new_tokens),
1799
- "do_sample": True,
1800
- "temperature": float(temperature),
1801
- "stopping_criteria": self.stopping_criteria
1802
- }
1803
-
1804
- def _run_generate():
1805
- # Filter out None values to avoid passing unexpected kwarg values
1806
- safe_kwargs = {k: v for k, v in gen_kwargs.items() if v is not None}
1807
- try:
1808
- self.model.generate(**safe_kwargs)
1809
- except Exception as e:
1810
- print(f"[ModelBridge] local generate failed: {e}")
1811
-
1812
- thread = threading.Thread(target=_run_generate, daemon=True)
1813
  thread.start()
1814
-
1815
  for new_text in streamer:
1816
  yield new_text # type: ignore
1817
 
1818
- def chat(self, message: str, effective_role: str = "user", caller_id: Optional[str] = None, max_new_tokens: int = 128, temperature: float = 0.7) -> str:
1819
- """Synchronous convenience method for getting a reply for short interactions.
1820
-
1821
- - In lite mode this uses the configured `pipe` for a single-shot generation.
1822
- - In full mode it prefers the DialogueManager streaming pipeline and blocks until
1823
- a final reply is produced.
1824
- """
1825
- try:
1826
- final_message, intent = self._prepare_chat_input(message, "en", False, None)
1827
- snippets = self._get_retrieval_context(message, effective_role, caller_id, getattr(self, 'retrieval_k', 6))
1828
- prompt = self.compiler.compile(final_message, snippets, intent=intent)
1829
-
1830
- # Lite/local single-shot path
1831
- if self.lite_mode or not hasattr(self, 'dialogue_manager'):
1832
- try:
1833
- out = self.pipe(prompt, max_new_tokens=max_new_tokens, do_sample=False, temperature=temperature)
1834
- if isinstance(out, list) and out:
1835
- return out[0].get("generated_text", str(out[0]))
1836
- if isinstance(out, str):
1837
- return out
1838
- return str(out)
1839
- except Exception as e:
1840
- return f"[Error: generation failed: {e}]"
1841
-
1842
- # Full mode: prefer DialogueManager streaming API but block until final
1843
- try:
1844
- full_reply = ""
1845
- for chunk in self.dialogue_manager.process_turn(message, caller_id or "guest", effective_role, "cli"):
1846
- if chunk.get("type") == "token":
1847
- full_reply += chunk.get("content", "")
1848
- elif chunk.get("type") == "final":
1849
- full_reply += chunk.get("content", "")
1850
- break
1851
- return full_reply
1852
- except Exception:
1853
- # Fallback to single-shot pipe if streaming fails
1854
- try:
1855
- out = self.pipe(prompt, max_new_tokens=max_new_tokens, do_sample=False, temperature=temperature)
1856
- if isinstance(out, list) and out:
1857
- return out[0].get("generated_text", str(out[0]))
1858
- return str(out)
1859
- except Exception as e:
1860
- return f"[Error: chat failed: {e}]"
1861
- except Exception as e:
1862
- return f"[Error preparing chat: {e}]"
1863
-
1864
- # ----------- coverage heuristic -----------
1865
- def coverage_score_from_snippets(snippets: list, scores: list) -> float:
1866
- if not snippets or not scores: return 0.0
1867
- s = sorted(scores, reverse=True)[:3]
1868
- base = sum(s) / len(s) if s else 0.0 # type: ignore
1869
- bonus = min(0.15, 0.03 * len(snippets))
1870
- return float(max(0.0, min(1.0, base + bonus)))
1871
-
1872
- # ----------- RBAC / users / lockouts (Restored) -----------
1873
- USERS_DB=os.path.join(CFG["STATE_DIR"],"users.json")
1874
- LOCKS_DB=os.path.join(CFG["STATE_DIR"],"lockouts.json")
1875
- VOICES_DB=os.path.join(CFG["STATE_DIR"],"voices.json")
1876
- ADAPT_DB=os.path.join(CFG["STATE_DIR"],"speech_adapt.json")
1877
-
1878
- def _init_users():
1879
- d={"owner":{"id":"owner:1","name":CFG["OWNER_NAME"],"role":"owner","pass":CFG["OWNER_PASS"],"second":CFG["OWNER_SECOND"],"prefs":{"activation_names":[CFG["AGENT_NAME"]],"language":"en"}},
1880
- "admins_super":[],"admins_general":[],"users":[]}
1881
- _save_json(USERS_DB,d); return d
1882
- def _load_users():
1883
- d=_load_json(USERS_DB, None); return d if d else _init_users()
1884
- def _find_user(d, name_or_id):
1885
- pools=[("owner",[d.get("owner")]),("admin_super",d.get("admins_super", [])),("admin_general",d.get("admins_general", [])),("user",d.get("users", []))]
1886
- for role,pool in pools:
1887
- for u in pool or []:
1888
- if u and (u.get("id")==name_or_id or u.get("name")==name_or_id): return u, role
1889
- return None, None
1890
-
1891
- PERMS={
1892
- "owner":{"can_add":["admin_super","admin_general","user"],"can_remove":["admin_super","admin_general","user"],
1893
- "can_edit_role_of":["admin_super","admin_general","user"],"can_edit_profile_of":["owner","admin_super","admin_general","user"],
1894
- "can_view_scopes":"all","maintenance":"full","code_edit":"approve_and_edit"},
1895
- "admin_super":{"can_add":["admin_general","user"],"can_remove":["admin_general","user"],
1896
- "can_edit_role_of":["admin_general","user"],"can_edit_profile_of":["admin_general","user"],
1897
- "can_view_scopes":"self_only","maintenance":"advanced","code_edit":"suggest_only"},
1898
- "admin_general":{"can_add":["user"],"can_remove":["user"],"can_edit_role_of":["user"],"can_edit_profile_of":["user"],
1899
- "can_view_scopes":"self_only","maintenance":"basic","code_edit":"suggest_only"},
1900
- "user":{"can_add":[],"can_remove":[],"can_edit_role_of":[],"can_edit_profile_of":["user"],
1901
- "can_view_scopes":"self_only","maintenance":"none","code_edit":"none"},
1902
- "guest":{"can_add":[],"can_remove":[],"can_edit_role_of":[],"can_edit_profile_of":[],
1903
- "can_view_scopes":"self_only","maintenance":"none","code_edit":"none"},
1904
- }
1905
 
1906
- def attempt_login(name_or_id:str, password:str="", second:Optional[str]=None):
1907
- d=_load_users(); locks=_load_json(LOCKS_DB,{ })
1908
- def lock_fail(lid, msg):
1909
- st=locks.get(lid, {"fails":0,"until":0}); st["fails"]=st.get("fails",0)+1; dur=180 if st["fails"]>=3 else 0; st["until"]=time.time()+dur if dur else 0
1910
- locks[lid]=st; _save_json(LOCKS_DB,locks); return False, msg
1911
- u,_=_find_user(d, name_or_id)
1912
- if not u: return False, "Profile not found."
1913
- role=u.get("role","user"); lid=str(u.get("id", u.get("name"))); now=time.time(); st=locks.get(lid, {"fails":0,"until":0})
1914
- if now < st.get("until",0): return False, f"Locked; try again in ~{int(st['until']-now)}s."
1915
- if role in ("admin_general","admin_super","owner") and (password!=u.get("pass") or (role=="owner" and u.get("second") and second!=u.get("second"))): return lock_fail(lid, "Credentials incorrect.")
1916
- locks[lid]={"fails":0,"until":0}; _save_json(LOCKS_DB,locks); return True, f"Welcome, {u.get('name')} ({role})."
1917
 
1918
- # ----------- overlay / hotpatch -----------
1919
- RUNTIME_OVERRIDES = os.path.join(HIVE_HOME, "system", "runtime_overrides.json")
1920
- ALLOWED_PATCH_KEYS={"prompt_head","retrieval_k","token_budget","temperature","router_rules","web_threshold"}
1921
- def _load_overrides():
1922
- if os.path.exists(RUNTIME_OVERRIDES):
1923
- try: return json.load(open(RUNTIME_OVERRIDES,"r",encoding="utf-8"))
1924
- except Exception: return {}
1925
- return {}
1926
- def _save_overrides(ovr:dict):
1927
- _atomic_write_json(RUNTIME_OVERRIDES, ovr)
1928
 
1929
- class RuntimeOverlay:
1930
- def __init__(self): self.ovr=_load_overrides()
1931
- def apply_to(self, hive: "Hive"):
1932
- o=self.ovr or {}
1933
- if isinstance(o.get("prompt_head"),str): hive.compiler.override_head=o["prompt_head"]
1934
- if isinstance(o.get("token_budget"),int): hive.compiler.override_budget=max(256, min(8192, o["token_budget"]))
1935
- hive.retrieval_k=int(o.get("retrieval_k",6)); hive.retrieval_k=max(3,min(24,hive.retrieval_k))
1936
- hive.decoding_temperature=float(o.get("temperature",0.7)); hive.decoding_temperature=max(0.0,min(1.5,hive.decoding_temperature))
1937
- rr=o.get("router_rules") or []
1938
- if isinstance(rr,list):
1939
- try: hive.engine.router_rules=[re.compile(pat,re.I) for pat in rr if isinstance(pat,str) and pat]
1940
- except re.error: hive.engine.router_rules=[]
1941
- t=o.get("web_threshold",None); hive.web_threshold=float(t) if isinstance(t,(int,float)) else 0.40
1942
- def patch(self, patch:dict, actor_role:str="hive")->Tuple[bool,str]:
1943
- if not CFG["ALLOW_RUNTIME_HOTPATCH"]: return False,"Runtime hotpatch disabled."
1944
- if actor_role not in ("hive","admin_general","admin_super","owner"): return False,"Unauthorized actor."
1945
- for k in list(patch.keys()):
1946
- if k not in ALLOWED_PATCH_KEYS: patch.pop(k,None)
1947
- if not patch: return False,"No allowed keys."
 
 
1948
 
1949
- # The Gradio UI launcher was accidentally inlined inside RuntimeOverlay.patch.
1950
- # Move it to a dedicated top-level function so the class method stays clean.
1951
-
1952
-
1953
- def launch_ui(bootstrap_instance):
1954
- demo = gr.Blocks()
1955
- with demo:
1956
- with gr.Column(scale=1, min_width=300):
1957
- with gr.Sidebar():
1958
- uid_state = gr.State(None)
1959
- role_state = gr.State("guest")
1960
- mode_state = gr.State("user")
1961
- phonics_state = gr.State(False) # type: ignore
1962
-
1963
- with gr.Accordion("Login & Profile", open=True):
1964
- login_name = gr.Textbox(label="Name or ID")
1965
- login_pass = gr.Textbox(label="Password (if required)", type="password")
1966
- login_second = gr.Textbox(label="Second (owner only)", type="password")
1967
- login_btn = gr.Button("Login")
1968
- login_status = gr.Markdown(elem_id="login_status") # type: ignore
1969
- profile_status = gr.Markdown("Login to see your profile.")
1970
- profile_save_btn = gr.Button("Save Profile")
1971
-
1972
- with gr.Accordion("🌐 Language Preference", open=False):
1973
- profile_lang = gr.Dropdown(choices=["en", "es", "fr", "de", "zh"], label="Preferred Language", value="en")
1974
-
1975
- with gr.Accordion("🗣️ Phonics Assist", open=False):
1976
- gr.Markdown("Enable to get phonetic hints for English words when using the 'pronounce' command.")
1977
- profile_phonics = gr.Checkbox(label="Enable Phonics Assist (for English)")
1978
-
1979
- with gr.Accordion("🧠 Memory & Vocabulary", open=False):
1980
- summary_output = gr.Markdown("Initializing... (Full core required, est. 1-2 min)")
1981
- summary_btn = gr.Button("Show Memory Summary", interactive=False)
1982
- vocab_output = gr.Markdown("---")
1983
- vocab_btn = gr.Button("Get New Word", interactive=False)
1984
- progress_output = gr.Markdown("---")
1985
-
1986
- with gr.Accordion("🗣️ Voice & Hands-Free", open=False, visible=True) as voice_accordion:
1987
- voice_status_md = gr.Markdown("Initializing voice models... (Est. 30-60 sec)")
1988
- with gr.Tabs() as voice_tabs:
1989
- with gr.TabItem("Push-to-Talk"):
1990
- ptt_audio_in = gr.Audio(sources=["microphone"], type="filepath", label="1. Record your message", interactive=False)
1991
- ptt_transcript = gr.Textbox(label="2. Transcript / Your Message", interactive=False)
1992
- with gr.Row():
1993
- ptt_transcribe_btn = gr.Button("Transcribe Only", interactive=False)
1994
- ptt_chat_btn = gr.Button("Send to Chat & Get Voice Reply", variant="primary", interactive=False)
1995
- ptt_reply_audio = gr.Audio(type="filepath", label="3. Assistant's Voice Reply", autoplay=True)
1996
- with gr.TabItem("Hands-Free"):
1997
- vocal_chat_state = gr.State({"active": False, "audio_buffer": b'', "last_interaction_time": 0, "conversation_timeout": 10.0})
1998
- vocal_chat_btn = gr.Button("Start Hands-Free Conversation", interactive=False)
1999
- vocal_chat_status = gr.Markdown("Status: Inactive")
2000
- vocal_mic = gr.Audio(sources=["microphone"], streaming=True, visible=False, autoplay=True)
2001
- wake_word_mic = gr.Audio(sources=["microphone"], streaming=True, visible=False, autoplay=False, elem_id="wake_word_mic")
2002
- wake_word_state = gr.State({"buffer": b""})
2003
- with gr.TabItem("Voice Login"):
2004
- gr.Markdown("Enroll your voice to enable password-free login for user accounts.")
2005
- enroll_audio = gr.Audio(sources=["microphone"], type="filepath", label="Record 5-10s for voiceprint", interactive=False)
2006
- with gr.Row():
2007
- enroll_btn = gr.Button("Enroll Voice for Current User", interactive=False)
2008
- enroll_status = gr.Markdown()
2009
- gr.Markdown("---")
2010
- gr.Markdown("After enrolling, you can log in by recording your voice here.")
2011
- with gr.Row():
2012
- who_btn = gr.Button("Login by Voice", interactive=False)
2013
- who_status = gr.Markdown()
2014
-
2015
- with gr.Accordion("📸 Camera", open=False, visible=True) as camera_accordion:
2016
- camera_status_md = gr.Markdown("Camera feature disabled or initializing...")
2017
- video_out = gr.Image(label="Camera", type="pil", interactive=False)
2018
-
2019
- with gr.Accordion("🌐 Network", open=False, visible=True) as network_accordion:
2020
- network_status_md = gr.Markdown("Initializing network features...")
2021
- wifi_status = gr.Markdown("Wi-Fi: checking...")
2022
- connect_now = gr.Button("Try auto-connect now (non-blocking)")
2023
- online_now = gr.Button("Fetch updates now", interactive=False)
2024
- online_status = gr.Markdown()
2025
-
2026
- with gr.Accordion("⚙️ Admin Console", open=False, visible=True) as admin_accordion:
2027
- admin_info = gr.Markdown("Login as an admin and switch to Admin mode to use these tools.")
2028
- mode_picker = gr.Radio(choices=["user", "admin"], value="user", label="Mode (admins only)")
2029
- with gr.Tabs() as admin_tabs:
2030
- with gr.TabItem("User Management"):
2031
- target = gr.Textbox(label="Target name or id")
2032
- new_name = gr.Textbox(label="New name")
2033
- rename_btn = gr.Button("Rename")
2034
- new_pass = gr.Textbox(label="New password")
2035
- pass_btn = gr.Button("Change password")
2036
- new_role = gr.Dropdown(choices=["owner", "admin_super", "admin_general", "user"], value="user", label="New role")
2037
- role_btn = gr.Button("Change role", elem_id="role_btn")
2038
- out = gr.Markdown()
2039
- with gr.TabItem("Add User"):
2040
- add_name = gr.Textbox(label="Add: name")
2041
- add_role = gr.Dropdown(choices=["admin_super", "admin_general", "user"], value="user", label="Add role")
2042
- add_pass = gr.Textbox(label="Add password (admins only)")
2043
- add_btn = gr.Button("Add user/admin")
2044
- out_add = gr.Markdown()
2045
- with gr.TabItem("System"):
2046
- ingest_status = gr.Markdown("Memory Ingestion: Idle")
2047
- ingest_now_btn = gr.Button("Start Background Ingestion", interactive=False)
2048
- mem_compress_btn = gr.Button("Compress Memory (archive)", interactive=False)
2049
- compress_status = gr.Markdown("")
2050
- hotpatch_patch = gr.Code(label="Paste hotpatch JSON (advanced)")
2051
- hotpatch_status = gr.Markdown("Awaiting patch")
2052
- hotpatch_apply = gr.Button("Apply Hotpatch", elem_id="hotpatch_apply", interactive=False)
2053
- with gr.TabItem("Optimization"):
2054
- gr.Markdown("### Internal Optimization (Change Manager)")
2055
- prop_kind = gr.Dropdown(choices=["model", "package", "code"], value="model", label="Proposal type")
2056
- prop_name = gr.Textbox(label="Model ID / Package Name")
2057
- prop_ver = gr.Textbox(label="Package version (optional)")
2058
- prop_reason = gr.Textbox(label="Why this change?")
2059
- prop_patch = gr.Code(label="Code patch (for 'code' proposals): paste full replacement or diff")
2060
- propose_btn = gr.Button("Propose", interactive=False)
2061
- test_btn = gr.Button("Test in sandbox", interactive=False)
2062
- apply_btn = gr.Button("Apply (policy-checked)", elem_id="apply_btn", interactive=False)
2063
- opt_out = gr.JSON(label="Result")
2064
-
2065
- # --- Event Handlers ---
2066
- def get_hive_instance():
2067
- """Return the full hive instance if ready, otherwise fall back to the lite instance.
2068
-
2069
- This helper is used by UI handlers to obtain a usable Hive object without
2070
- relying on a global name. `bootstrap_instance` is captured from the
2071
- surrounding `launch_ui` scope.
2072
- """
2073
- try:
2074
- if getattr(bootstrap_instance, 'hive_instance', None):
2075
- return bootstrap_instance.hive_instance
2076
- except Exception:
2077
- pass
2078
- try:
2079
- return bootstrap_instance.hive_lite_instance
2080
- except Exception:
2081
- return None
2082
 
2083
  def _sanitize_input(text: str) -> str:
2084
  """Removes control characters and leading/trailing whitespace."""
2085
- if not text:
2086
- return ""
2087
  return "".join(ch for ch in text if unicodedata.category(ch)[0] != "C").strip()
2088
-
2089
- def talk(m, uid, role, mode, hist, request: gr.Request): # type: ignore
2090
  eff = role if mode == "admin" else "user"
2091
  session_id = request.session_hash
2092
  # Use session_id for guests, uid for logged-in users # type: ignore
2093
  current_user_id = uid or session_id
2094
-
2095
  sanitized_m = _sanitize_input(m)
2096
  if not sanitized_m:
2097
  hist.append([m, "Please provide a message."])
2098
  yield hist, gr.Textbox(value="")
2099
  return
2100
-
2101
  # Convert history to the required 'messages' format and append the new user message.
2102
- messages_hist = gr.Chatbot.postprocess(hist) if hist else [] # `postprocess` handles the conversion
 
2103
  messages_hist.append({"role": "user", "content": sanitized_m})
2104
- yield messages_hist, gr.Textbox(value="", interactive=False) # Show user message immediately, disable textbox
2105
-
2106
- hive_instance = get_hive_instance() # type: ignore
2107
 
 
 
2108
  # Use the appropriate chat method based on the current mode.
2109
  if hive_instance.lite_mode:
2110
  # Lite mode uses a direct, non-streaming chat call for immediate response.
2111
- reply = hive_instance.chat(sanitized_m, eff, current_user_id) # type: ignore
2112
  messages_hist.append({"role": "assistant", "content": reply})
2113
  yield messages_hist, gr.Textbox(value="", interactive=True)
2114
  else:
2115
  # Full mode uses the DialogueManager for a streaming response.
2116
  full_reply = ""
2117
- messages_hist.append({"role": "assistant", "content": ""}) # Placeholder for the stream
2118
- try: # type: ignore
2119
- for chunk in hive_instance.dialogue_manager.process_turn(sanitized_m, current_user_id, eff, session_id): # type: ignore
2120
  if chunk["type"] == "token":
2121
  full_reply += chunk["content"]
2122
  messages_hist[-1]["content"] = full_reply
2123
- yield messages_hist, gr.Textbox(value="", interactive=False) # Keep disabled during stream
2124
  elif chunk["type"] == "final":
2125
  messages_hist[-1]["content"] = chunk["content"]
2126
- yield messages_hist, gr.Textbox(value="", interactive=True) # Re-enable when done
2127
  except Exception as e:
2128
  error_msg = f"Error in DialogueManager: {e}"
2129
  print(f"[ERROR] {error_msg}")
2130
  messages_hist[-1]["content"] = f"An error occurred: {error_msg}"
2131
  yield messages_hist, gr.Textbox(value="", interactive=True)
2132
 
2133
- # Note: `msg`, `chatbot`, and other UI elements referenced here must be
2134
- # defined elsewhere in the surrounding `gr.Blocks` layout. Ensure the
2135
- # top-level UI includes those widgets when composing the full interface.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2136
 
2137
- demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT")) if os.environ.get("PORT") else None, share=False)
2138
- return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139
 
2140
  class Bootstrap:
2141
  """Handles the entire application startup sequence cleanly."""
@@ -2149,6 +2407,7 @@ class Bootstrap:
2149
  self.voice_ready = threading.Event()
2150
  self.env: Optional[Dict] = None
2151
  self.app: Optional[gr.Blocks] = None
 
2152
  self.ui_thread: Optional[threading.Thread] = None
2153
 
2154
  def initialize_persistent_storage(self, base_path: str):
@@ -2160,6 +2419,18 @@ class Bootstrap:
2160
  if not (root / "system" / "config.json").exists():
2161
  _save_json(root / "system" / "config.json", {"note": "Default config created by Bootstrap."})
2162
 
 
 
 
 
 
 
 
 
 
 
 
 
2163
  def run(self):
2164
  """Executes the full startup sequence."""
2165
  print("[Bootstrap] Starting Hive System...")
@@ -2173,53 +2444,44 @@ class Bootstrap:
2173
  print("[Bootstrap] Low memory detected, enabling ultra-constrained mode.")
2174
  self.config["CTX_TOKENS"] = min(self.config.get("CTX_TOKENS", 2048), 1024)
2175
 
2176
- print("[Bootstrap] Initializing Lite Hive core...")
2177
- Hive.bootstrap_instance = self
2178
- if self.caps.get("is_low_memory"):
2179
- print("[Bootstrap] Low memory mode: Lite instance will be minimal.")
2180
- self.hive_lite_instance = Hive(lite=True, caps=self.caps)
2181
-
2182
- self.video_service = VideoService()
2183
- self.video_service.start()
2184
- print("[Bootstrap] Lite Hive core is ready.")
2185
 
2186
  def full_init_task():
2187
- try:
2188
- print("[Bootstrap] Initializing Full Hive core in background...")
2189
- Hive.bootstrap_instance = self
2190
- self.hive_instance = Hive(lite=False, caps=self.caps)
2191
- self.hive_ready.set()
2192
- print("[Bootstrap] Full Hive core is ready.")
2193
- except Exception as e:
2194
- print(f"[ERROR] Full Hive core initialization failed: {e}")
2195
- # Optionally, set hive_ready to indicate failure or a partial state
2196
- # For now, we'll just log and leave hive_ready unset,
2197
- # which means UI elements waiting for it won't activate.
2198
 
2199
  def voice_init_task():
2200
  """Initializes voice models in a separate thread."""
2201
- print("[Bootstrap] Initializing Voice models (ASR & TTS) in parallel...")
2202
-
2203
- asr_thread = threading.Thread(target=get_asr, daemon=True)
2204
- tts_thread = threading.Thread(target=get_tts, args=(CFG["TTS_LANG"],), daemon=True)
2205
-
2206
- asr_thread.start()
2207
- tts_thread.start()
2208
-
2209
- asr_thread.join()
2210
- tts_thread.join()
2211
-
2212
  self.voice_ready.set()
2213
- print("[Bootstrap] Voice models are ready.")
2214
 
2215
- threading.Thread(target=self.setup_memory, daemon=True).start()
2216
- self.ui_thread = threading.Thread(target=self.launch, daemon=True)
2217
- threading.Thread(target=voice_init_task, daemon=True).start()
2218
- threading.Thread(target=full_init_task, daemon=True).start()
 
 
 
 
 
2219
  import signal
2220
  signal.signal(signal.SIGINT, self.graceful_shutdown)
2221
  signal.signal(signal.SIGTERM, self.graceful_shutdown)
2222
- self.launch()
 
 
 
 
 
 
 
 
 
2223
 
2224
  def soft_restart(self):
2225
  """Performs a hot-reload of the application logic without restarting the process."""
@@ -2227,7 +2489,7 @@ class Bootstrap:
2227
  self.hive_ready.clear()
2228
  if self.hive_instance:
2229
  self.hive_instance.module_manager.stop_all()
2230
- if self.app:
2231
  self.app.close()
2232
  self.ui_thread.join(timeout=5.0)
2233
 
 
1458
  class LibrarianModule(LibrarianCurve, IModule):
1459
  def __init__(self, hive_instance: "Hive"):
1460
  IModule.__init__(self, hive_instance)
 
1461
 
1462
  class VoiceServicesModule(IModule):
1463
  def __init__(self, hive_instance: "Hive"):
 
1477
  # ----------- Hive core -----------
1478
  # --- Memory & Manifest Helpers (auto-inserted) ---
1479
  import tempfile, urllib.request, tarfile, zipfile
1480
+ from collections import OrderedDict
1481
+ from concurrent.futures import ThreadPoolExecutor
1482
  import importlib
1483
  from pathlib import Path as _Path
1484
 
 
1528
 
1529
  return f"{head} {insight}\n\nUser: {final_instruction}\nAssistant:"
1530
 
1531
+ class KnowledgeStoreModule(KnowledgeStore, IModule):
1532
+ def __init__(self, hive_instance: "Hive"): IModule.__init__(self, hive_instance); KnowledgeStore.__init__(self, hive_instance.config["HIVE_HOME"])
1533
+ def start(self): pass
1534
+ def stop(self): pass
1535
+
1536
+ class CurveStoreModule(CurveStore, IModule):
1537
+ def __init__(self, hive_instance: "Hive"):
1538
+ IModule.__init__(self, hive_instance)
1539
+ CurveStore.__init__(self, hive_instance.config["CURVE_DIR"])
1540
+ def start(self): pass
1541
+ def stop(self): pass
1542
+
1543
+ class LibrarianModule(LibrarianCurve, IModule):
1544
+ def __init__(self, hive_instance: "Hive"):
1545
+ IModule.__init__(self, hive_instance)
1546
+ LibrarianCurve.__init__(self, hive_instance.store, hive_instance.k_store)
1547
+
1548
+ class EngineModule(EngineCurve, IModule):
1549
+ def __init__(self, hive_instance: "Hive"):
1550
+ IModule.__init__(self, hive_instance)
1551
+ EngineCurve.__init__(self)
1552
+ def start(self): pass
1553
+ def stop(self): pass
1554
+
1555
+ class OverlayModule(RuntimeOverlay, IModule):
1556
+ def __init__(self, hive_instance: "Hive"):
1557
+ IModule.__init__(self, hive_instance)
1558
+ RuntimeOverlay.__init__(self)
1559
+ def start(self): self.apply_to(self.hive)
1560
+ def stop(self): pass
1561
+
1562
+ class CompilerModule(PromptCompiler, IModule):
1563
+ def __init__(self, hive_instance: "Hive"): IModule.__init__(self, hive_instance); PromptCompiler.__init__(self); hive_instance.decoding_temperature=0.7
1564
+ def start(self): pass
1565
+ def stop(self): pass
1566
+
1567
  class Hive:
1568
  def __init__(self, model_id: Optional[str]=None, device: Optional[str]=None, caps: Optional[Dict]=None, lite: bool = False):
1569
  self.config = CFG
 
1571
  self.lite_mode = lite
1572
  self.module_manager = ModuleManager()
1573
  Hive.bootstrap_instance = None # Class attribute to hold bootstrap instance
1574
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575
  if not model_id:
1576
  model_id, info = pick_model(self.caps)
1577
  device = info.get("device", "cpu")
 
1590
 
1591
  def _init_full_mode(self):
1592
  """Initializes the Hive in full-featured mode."""
1593
+ print("[Hive] Initializing in Full Mode (parallel module loading).")
1594
+
1595
+ # Modules with dependencies are loaded first, sequentially.
1596
+ self.module_manager.register("kstore", KnowledgeStoreModule(self)) # Depends on nothing
1597
+ self.module_manager.register("store", CurveStoreModule(self)) # Depends on nothing
1598
+ self.module_manager.register("librarian", LibrarianModule(self)) # Depends on kstore, store
1599
+ self.module_manager.register("compiler", CompilerModule(self)) # Depends on nothing
1600
+
1601
+ # Independent modules can be loaded in parallel to speed up startup.
1602
+ with ThreadPoolExecutor(max_workers=4) as executor:
1603
+ futures = {
1604
+ executor.submit(lambda: self.module_manager.register("engine", EngineModule(self))),
1605
+ executor.submit(lambda: self.module_manager.register("overlay", OverlayModule(self))),
1606
+ executor.submit(lambda: self.module_manager.register("changes", ChangeManagerModule(self))),
1607
+ executor.submit(lambda: self.module_manager.register("voice_video", VoiceServicesModule(self))),
1608
+ executor.submit(lambda: self.module_manager.register("persistence", PersistenceEngine(self))),
1609
+ executor.submit(lambda: self.module_manager.register("selfopt", SelfOptimizerModule(self))),
1610
+ }
1611
+ for future in futures:
1612
+ future.result() # Wait for all parallel tasks to complete
1613
 
1614
+ self.dialogue_manager = DialogueManager(self)
1615
  self._setup_llm_pipeline()
 
1616
  self.module_manager.start_all()
1617
 
1618
  def _load_local_model(self, trust: bool, **kwargs):
 
1805
  yield "[Error: Local model is not available]"
1806
  return
1807
  from transformers import TextIteratorStreamer
 
 
1808
  streamer = TextIteratorStreamer(self.tok, skip_prompt=True, skip_special_tokens=True)
1809
+ inputs = self.tok([prompt], return_tensors="pt").to(self.device)
1810
+ generation_kwargs = dict(
1811
+ inputs,
1812
+ streamer=streamer,
1813
+ max_new_tokens=max_new_tokens,
1814
+ do_sample=True,
1815
+ temperature=temperature,
1816
+ stopping_criteria=self.stopping_criteria
1817
+ )
1818
+ thread = threading.Thread(target=self.model.generate, kwargs=generation_kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1819
  thread.start()
 
1820
  for new_text in streamer:
1821
  yield new_text # type: ignore
1822
 
1823
+ def chat(self, message:str, effective_role:str, caller_id: Optional[str],
1824
+ k:int=None, max_new_tokens:int=256, temperature:float=None, prompt_override: Optional[str] = None) -> str: # type: ignore
1825
+ temp = temperature if temperature is not None else (self.decoding_temperature if not self.lite_mode else 0.7)
1826
+
1827
+ user_obj, _ = _find_user(_load_users(), caller_id)
1828
+ user_prefs = (user_obj.get("prefs", {}) or {}) if user_obj else {}
1829
+ user_lang = user_prefs.get("language", "en")
1830
+ phonics_on = user_prefs.get("phonics_on", False)
1831
+
1832
+ final_message, intent = self._prepare_chat_input(message, user_lang, phonics_on, prompt_override) # type: ignore
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1833
 
1834
+ if self.lite_mode:
1835
+ prompt = f"User: {final_message}\nAssistant:"
1836
+ full_reply = "".join(list(self.chat_stream(prompt, max_new_tokens, temp)))
1837
+ return full_reply.rsplit("Assistant:", 1)[-1].strip()
 
 
 
 
 
 
 
1838
 
1839
+ kk = k if k is not None else (self.retrieval_k if hasattr(self, 'retrieval_k') else 6)
1840
+ snippets = self._get_retrieval_context(message, effective_role, caller_id, kk) # type: ignore
 
 
 
 
 
 
 
 
1841
 
1842
+ prompt = self.compiler.compile( # type: ignore
1843
+ final_message,
1844
+ snippets,
1845
+ token_budget=int(CFG["CTX_TOKENS"]),
1846
+ intent=intent,
1847
+ user_lang=user_lang
1848
+ )
1849
+
1850
+ full_output = "".join(list(self.chat_stream(prompt, max_new_tokens, temp))) # type: ignore
1851
+ self.engine.run(message, snippets)
1852
+
1853
+ return self._postprocess_and_log(full_output, message, effective_role, caller_id, intent, snippets)
1854
+
1855
+ # --------------- UI ---------------
1856
+ HELP=f"""
1857
+ **Admin/User mode**: Admins (general/super) and Owner log in with password (Owner also needs second factor). After login choose Admin or User mode.
1858
+ **Owner-only code edits** are enforced via Change Manager policy. Hive can sandbox, test, and propose; code writes require Owner approval (`OPT_AUTO_APPLY=1`) unless Owner applies manually.
1859
+ **Offline/Online**: Works fully offline from curves. If online and enabled, fetches RSS/web snippets ➡️ summarizes locally ➡️ saves to curves (persists offline).
1860
+ **Voice**: Faster-Whisper ASR (auto language), Piper TTS mixed-language, phonics hints (English).
1861
+ **Privacy**: Sensitive/first-person inputs route to user-private library; neutral info to general.
1862
+ """
1863
 
1864
+ def launch_ui(bootstrap_instance: "Bootstrap"):
1865
+ HIVE_INSTANCE: Optional[Hive] = None
1866
+ def get_hive_instance():
1867
+ nonlocal HIVE_INSTANCE
1868
+
1869
+ # If the full hive is ready, ensure we are using it.
1870
+ if bootstrap_instance.hive_ready.is_set():
1871
+ if HIVE_INSTANCE is None or HIVE_INSTANCE.lite_mode:
1872
+ HIVE_INSTANCE = bootstrap_instance.hive_instance
1873
+ print("[UI] Full Hive instance attached.")
1874
+ return HIVE_INSTANCE
1875
+
1876
+ # Otherwise, use the lite instance.
1877
+ if HIVE_INSTANCE is None:
1878
+ HIVE_INSTANCE = bootstrap_instance.hive_lite_instance
1879
+ print("[UI] Using Lite Hive instance while full core initializes.")
1880
+ return HIVE_INSTANCE
1881
+
1882
+ with gr.Blocks(title="Hive 🐝") as demo:
1883
+ with gr.Row():
1884
+ with gr.Column(scale=3):
1885
+ gr.Markdown(f"## {CFG['AGENT_NAME']} 🐝")
1886
+ core_status = gr.Markdown("⏳ **Initializing Full Hive Core...** You can chat with the Lite model now. Advanced features will be enabled shortly.") # type: ignore
1887
+ chatbot = gr.Chatbot(height=600, type="messages", label="Chat")
1888
+ msg = gr.Textbox(placeholder=f"Talk to {CFG['AGENT_NAME']} (Lite Mode)", interactive=True, show_label=False, container=False, scale=4)
1889
+
1890
+ with gr.Column(scale=1, min_width=300):
1891
+ with gr.Sidebar():
1892
+ uid_state=gr.State(None); role_state=gr.State("guest"); mode_state=gr.State("user"); phonics_state=gr.State(False) # type: ignore
1893
+
1894
+ with gr.Accordion("Login & Profile", open=True):
1895
+ login_name=gr.Textbox(label="Name or ID")
1896
+ login_pass=gr.Textbox(label="Password (if required)", type="password")
1897
+ login_second=gr.Textbox(label="Second (owner only)", type="password")
1898
+ login_btn=gr.Button("Login")
1899
+ login_status=gr.Markdown(elem_id="login_status") # type: ignore
1900
+ profile_status = gr.Markdown("Login to see your profile.")
1901
+ profile_save_btn = gr.Button("Save Profile")
1902
+
1903
+ with gr.Accordion("🌐 Language Preference", open=False):
1904
+ profile_lang = gr.Dropdown(choices=["en","es","fr","de","zh"], label="Preferred Language", value="en")
1905
+
1906
+ with gr.Accordion("🗣️ Phonics Assist", open=False):
1907
+ gr.Markdown("Enable to get phonetic hints for English words when using the 'pronounce' command.")
1908
+ profile_phonics = gr.Checkbox(label="Enable Phonics Assist (for English)")
1909
+
1910
+ with gr.Accordion("🧠 Memory & Vocabulary", open=False):
1911
+ summary_output = gr.Markdown("Initializing... (Full core required, est. 1-2 min)")
1912
+ summary_btn = gr.Button("Show Memory Summary", interactive=False)
1913
+ vocab_output = gr.Markdown("---")
1914
+ vocab_btn = gr.Button("Get New Word", interactive=False)
1915
+ progress_output = gr.Markdown("---")
1916
+
1917
+ with gr.Accordion("🗣️ Voice & Hands-Free", open=False, visible=True) as voice_accordion:
1918
+ voice_status_md = gr.Markdown("Initializing voice models... (Est. 30-60 sec)")
1919
+ with gr.Tabs() as voice_tabs:
1920
+ with gr.TabItem("Push-to-Talk"):
1921
+ ptt_audio_in = gr.Audio(sources=["microphone"], type="filepath", label="1. Record your message", interactive=False)
1922
+ ptt_transcript = gr.Textbox(label="2. Transcript / Your Message", interactive=False)
1923
+ with gr.Row():
1924
+ ptt_transcribe_btn = gr.Button("Transcribe Only", interactive=False)
1925
+ ptt_chat_btn = gr.Button("Send to Chat & Get Voice Reply", variant="primary", interactive=False)
1926
+ ptt_reply_audio = gr.Audio(type="filepath", label="3. Assistant's Voice Reply", autoplay=True)
1927
+ with gr.TabItem("Hands-Free"):
1928
+ vocal_chat_state = gr.State({"active": False, "audio_buffer": b'', "last_interaction_time": 0, "conversation_timeout": 10.0})
1929
+ vocal_chat_btn = gr.Button("Start Hands-Free Conversation", interactive=False)
1930
+ vocal_chat_status = gr.Markdown("Status: Inactive")
1931
+ vocal_mic = gr.Audio(sources=["microphone"], streaming=True, visible=False, autoplay=True)
1932
+ wake_word_mic = gr.Audio(sources=["microphone"], streaming=True, visible=False, autoplay=False, elem_id="wake_word_mic")
1933
+ wake_word_state = gr.State({"buffer": b""})
1934
+ with gr.TabItem("Voice Login"):
1935
+ gr.Markdown("Enroll your voice to enable password-free login for user accounts.")
1936
+ enroll_audio = gr.Audio(sources=["microphone"], type="filepath", label="Record 5-10s for voiceprint", interactive=False)
1937
+ with gr.Row():
1938
+ enroll_btn = gr.Button("Enroll Voice for Current User", interactive=False)
1939
+ enroll_status = gr.Markdown()
1940
+ gr.Markdown("---")
1941
+ gr.Markdown("After enrolling, you can log in by recording your voice here.")
1942
+ with gr.Row():
1943
+ who_btn = gr.Button("Login by Voice", interactive=False)
1944
+ who_status = gr.Markdown()
1945
+
1946
+ with gr.Accordion("📸 Camera", open=False, visible=True) as camera_accordion:
1947
+ camera_status_md = gr.Markdown("Camera feature disabled or initializing...")
1948
+ video_out = gr.Image(label="Camera", type="pil", interactive=False)
1949
+
1950
+ with gr.Accordion("🌐 Network", open=False, visible=True) as network_accordion:
1951
+ network_status_md = gr.Markdown("Initializing network features...")
1952
+ wifi_status=gr.Markdown("Wi-Fi: checking...")
1953
+ connect_now=gr.Button("Try auto-connect now (non-blocking)")
1954
+ online_now=gr.Button("Fetch updates now", interactive=False)
1955
+ online_status=gr.Markdown()
1956
+
1957
+ with gr.Accordion("⚙️ Admin Console", open=False, visible=True) as admin_accordion:
1958
+ admin_info=gr.Markdown("Login as an admin and switch to Admin mode to use these tools.")
1959
+ mode_picker=gr.Radio(choices=["user","admin"], value="user", label="Mode (admins only)")
1960
+ with gr.Tabs() as admin_tabs:
1961
+ with gr.TabItem("User Management"):
1962
+ target=gr.Textbox(label="Target name or id")
1963
+ new_name=gr.Textbox(label="New name")
1964
+ rename_btn=gr.Button("Rename")
1965
+ new_pass=gr.Textbox(label="New password")
1966
+ pass_btn=gr.Button("Change password")
1967
+ new_role=gr.Dropdown(choices=["owner","admin_super","admin_general","user"], value="user", label="New role")
1968
+ role_btn=gr.Button("Change role", elem_id="role_btn")
1969
+ out=gr.Markdown()
1970
+ with gr.TabItem("Add User"):
1971
+ add_name=gr.Textbox(label="Add: name")
1972
+ add_role=gr.Dropdown(choices=["admin_super","admin_general","user"], value="user", label="Add role")
1973
+ add_pass=gr.Textbox(label="Add password (admins only)")
1974
+ add_btn=gr.Button("Add user/admin")
1975
+ out_add=gr.Markdown()
1976
+ with gr.TabItem("System"):
1977
+ ingest_status = gr.Markdown("Memory Ingestion: Idle")
1978
+ ingest_now_btn = gr.Button("Start Background Ingestion", interactive=False)
1979
+ mem_compress_btn=gr.Button("Compress Memory (archive)", interactive=False)
1980
+ compress_status=gr.Markdown("")
1981
+ hotpatch_patch=gr.Code(label="Paste hotpatch JSON (advanced)")
1982
+ hotpatch_status=gr.Markdown("Awaiting patch")
1983
+ hotpatch_apply=gr.Button("Apply Hotpatch", elem_id="hotpatch_apply", interactive=False)
1984
+ with gr.TabItem("Optimization"):
1985
+ gr.Markdown("### Internal Optimization (Change Manager)")
1986
+ prop_kind=gr.Dropdown(choices=["model","package","code"], value="model", label="Proposal type")
1987
+ prop_name=gr.Textbox(label="Model ID / Package Name")
1988
+ prop_ver=gr.Textbox(label="Package version (optional)")
1989
+ prop_reason=gr.Textbox(label="Why this change?")
1990
+ prop_patch=gr.Code(label="Code patch (for 'code' proposals): paste full replacement or diff")
1991
+ propose_btn=gr.Button("Propose", interactive=False)
1992
+ test_btn=gr.Button("Test in sandbox", interactive=False)
1993
+ apply_btn=gr.Button("Apply (policy-checked)", elem_id="apply_btn", interactive=False)
1994
+ opt_out=gr.JSON(label="Result")
1995
+
1996
+ # --- Event Handlers ---
1997
 
1998
  def _sanitize_input(text: str) -> str:
1999
  """Removes control characters and leading/trailing whitespace."""
2000
+ if not text: return ""
 
2001
  return "".join(ch for ch in text if unicodedata.category(ch)[0] != "C").strip()
2002
+
2003
+ def talk(m, uid, role, mode, hist, request: gr.Request): # type: ignore
2004
  eff = role if mode == "admin" else "user"
2005
  session_id = request.session_hash
2006
  # Use session_id for guests, uid for logged-in users # type: ignore
2007
  current_user_id = uid or session_id
2008
+
2009
  sanitized_m = _sanitize_input(m)
2010
  if not sanitized_m:
2011
  hist.append([m, "Please provide a message."])
2012
  yield hist, gr.Textbox(value="")
2013
  return
2014
+
2015
  # Convert history to the required 'messages' format and append the new user message.
2016
+ # This is the definitive fix for the data format error.
2017
+ messages_hist = gr.Chatbot.postprocess(hist) if hist else [] # `postprocess` handles the conversion
2018
  messages_hist.append({"role": "user", "content": sanitized_m})
2019
+ yield messages_hist, gr.Textbox(value="", interactive=False) # Show user message immediately, disable textbox
 
 
2020
 
2021
+ hive_instance = get_hive_instance() # type: ignore
2022
+
2023
  # Use the appropriate chat method based on the current mode.
2024
  if hive_instance.lite_mode:
2025
  # Lite mode uses a direct, non-streaming chat call for immediate response.
2026
+ reply = hive_instance.chat(sanitized_m, eff, current_user_id) # type: ignore
2027
  messages_hist.append({"role": "assistant", "content": reply})
2028
  yield messages_hist, gr.Textbox(value="", interactive=True)
2029
  else:
2030
  # Full mode uses the DialogueManager for a streaming response.
2031
  full_reply = ""
2032
+ messages_hist.append({"role": "assistant", "content": ""}) # Placeholder for the stream
2033
+ try: # type: ignore
2034
+ for chunk in hive_instance.dialogue_manager.process_turn(sanitized_m, current_user_id, eff, session_id): # type: ignore
2035
  if chunk["type"] == "token":
2036
  full_reply += chunk["content"]
2037
  messages_hist[-1]["content"] = full_reply
2038
+ yield messages_hist, gr.Textbox(value="", interactive=False) # Keep disabled during stream
2039
  elif chunk["type"] == "final":
2040
  messages_hist[-1]["content"] = chunk["content"]
2041
+ yield messages_hist, gr.Textbox(value="", interactive=True) # Re-enable when done
2042
  except Exception as e:
2043
  error_msg = f"Error in DialogueManager: {e}"
2044
  print(f"[ERROR] {error_msg}")
2045
  messages_hist[-1]["content"] = f"An error occurred: {error_msg}"
2046
  yield messages_hist, gr.Textbox(value="", interactive=True)
2047
 
2048
+ msg.submit(talk, [msg, uid_state, role_state, mode_state, chatbot], [chatbot, msg], api_name="chat")
2049
+
2050
+ def do_memory_summary(uid, request: gr.Request):
2051
+ hive_instance = get_hive_instance() # type: ignore
2052
+ if hive_instance.lite_mode: return "Memory features are disabled in Lite Mode." # type: ignore
2053
+ current_user_id = uid or request.session_hash
2054
+ log_path = os.path.join(CFG["HIVE_HOME"], "users", "conversations", f"{current_user_id}.jsonl")
2055
+ if not os.path.exists(log_path): return "No conversation history found."
2056
+ try: # type: ignore
2057
+ with open(log_path, "r", encoding="utf-8") as f:
2058
+ lines = f.readlines()[-10:]
2059
+ if not lines: return "Not enough conversation history to summarize." # type: ignore
2060
+ text_to_summarize = "\n".join([json.loads(line).get("message", "") + "\n" + json.loads(line).get("reply", "") for line in lines])
2061
+ summary = hive_instance.summarize_for_memory(text_to_summarize)
2062
+ return summary if summary.strip() else "Could not generate a summary from recent conversations."
2063
+ except Exception as e: return f"Error generating summary: {e}"
2064
+ summary_btn.click(do_memory_summary, [uid_state], [summary_output])
2065
+
2066
+ def do_get_vocab_word(uid, request: gr.Request):
2067
+ hive_instance = get_hive_instance() # type: ignore
2068
+ if hive_instance.lite_mode: return "Vocabulary features are disabled in Lite Mode." # type: ignore
2069
+ current_user_id = uid or request.session_hash
2070
+ log_path = os.path.join(CFG["HIVE_HOME"], "users", "conversations", f"{current_user_id}.jsonl")
2071
+ if not os.path.exists(log_path): return "No conversation history to find words from."
2072
+ try:
2073
+ with open(log_path, "r", encoding="utf-8") as f:
2074
+ content = f.read()
2075
+ words = [w for w in re.findall(r'\b\w{7,}\b', content.lower()) if w not in ["assistant", "message"]]
2076
+ if not words: return "No challenging words found yet. Keep chatting!" # type: ignore
2077
+ word = random.choice(words)
2078
+ definition = hive_instance.chat(f"What is the definition of the word '{word}'? Provide a simple, clear definition and one example sentence.", "user", current_user_id)
2079
+ return f"**{word.capitalize()}**: {definition}"
2080
+ except Exception as e: return f"Error getting vocabulary word: {e}"
2081
+
2082
+ def wait_for_memory_features():
2083
+ """Waits for the full Hive core and enables memory-related UI features."""
2084
+ bootstrap_instance.hive_ready.wait()
2085
+ hive_instance = get_hive_instance() # Ensure the UI's HIVE_INSTANCE is updated to full
2086
+ return (
2087
+ "✅ **Full Hive Core is Ready.** Advanced features are now online.",
2088
+ "Click the button to generate a summary of your recent conversations.",
2089
+ gr.Button(interactive=True),
2090
+ "Click to get a new vocabulary word from your conversations.",
2091
+ gr.Button(interactive=True),
2092
+ "Your progress will be shown here. Click the button to update.",
2093
+ # Enable other advanced feature buttons
2094
+ gr.Button(interactive=True), # online_now
2095
+ gr.Button(interactive=True), # ingest_now_btn
2096
+ gr.Button(interactive=True), # mem_compress_btn
2097
+ gr.Button(interactive=True), # hotpatch_apply
2098
+ gr.Button(interactive=True), # propose_btn
2099
+ gr.Button(interactive=True), # test_btn
2100
+ gr.Button(interactive=True), # apply_btn
2101
+ )
2102
+ demo.load(wait_for_memory_features, None, [core_status, summary_output, summary_btn, vocab_output, vocab_btn, progress_output, online_now, ingest_now_btn, mem_compress_btn, hotpatch_apply, propose_btn, test_btn, apply_btn, network_status_md])
2103
+ vocab_btn.click(do_get_vocab_word, [uid_state], [vocab_output]) # type: ignore
2104
+
2105
+
2106
+ def wait_for_voice_features(request: gr.Request):
2107
+ """Waits for ASR/TTS models and enables voice-related UI elements."""
2108
+ bootstrap_instance.voice_ready.wait()
2109
+ hive_instance = get_hive_instance()
2110
+
2111
+ voice_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'asr_service') and hasattr(hive_instance, 'tts_service')
2112
+ video_ready = not hive_instance.lite_mode and hasattr(hive_instance, 'video_service') and CFG["VIDEO_ENABLED"]
2113
+
2114
+ return (
2115
+ gr.Markdown("✅ Voice models ready.", visible=voice_ready),
2116
+ gr.Audio(interactive=voice_ready), # ptt_audio_in
2117
+ gr.Textbox(interactive=voice_ready), # ptt_transcript
2118
+ gr.Button(interactive=voice_ready), # ptt_transcribe_btn
2119
+ gr.Button(interactive=voice_ready), # ptt_chat_btn
2120
+ gr.Button(interactive=voice_ready), # vocal_chat_btn
2121
+ gr.Audio(interactive=voice_ready), # enroll_audio
2122
+ gr.Button(interactive=voice_ready), # enroll_btn
2123
+ gr.Button(interactive=voice_ready), # who_btn
2124
+ gr.Markdown("✅ Camera ready." if video_ready else "Camera disabled or not found.", visible=True),
2125
+ gr.Image(interactive=video_ready), # video_out
2126
+ )
2127
+ demo.load(wait_for_voice_features, None, [voice_status_md, ptt_audio_in, ptt_transcript, ptt_transcribe_btn, ptt_chat_btn, vocal_chat_btn, enroll_audio, enroll_btn, who_btn, camera_status_md, video_out])
2128
+
2129
+ def do_online_update():
2130
+ hive_instance = get_hive_instance() # type: ignore
2131
+ if hive_instance.lite_mode: return "Online features are disabled in Lite Mode." # type: ignore
2132
+ return "Added %s new summaries to curves." % (hive_instance.online_update().get("added",0))
2133
+
2134
+ connect_now.click(lambda: (NET.kick_async() or "Auto-connect started in background."), [], [wifi_status]) # type: ignore
2135
+ online_now.click(do_online_update, [], [online_status])
2136
+
2137
+ def on_login_or_mode_change(role, pick): # type: ignore
2138
+ is_adm = is_admin(pick, role)
2139
+ return gr.Tab(visible=is_adm)
2140
+
2141
+ # This function is now the core of the hands-free mode, using the new VADService.
2142
+ def process_vocal_chat_stream(stream, state, uid, role, mode, chatbot_history, request: gr.Request): # type: ignore
2143
+ now = time.time()
2144
+ hive_instance = get_hive_instance() # type: ignore
2145
+ if hive_instance.lite_mode or not hasattr(hive_instance, 'vad_service') or not hive_instance.vad_service: # type: ignore
2146
+ return None, state, chatbot_history, "VAD service not ready."
2147
+
2148
+ if stream is None:
2149
+ if state["active"] and now - state.get("last_interaction_time", now) > state["conversation_timeout"]:
2150
+ state["active"] = False
2151
+ return None, state, chatbot_history, "Status: Sleeping. Say wake word to start."
2152
+ return None, state, chatbot_history, state.get("status_text", "Status: Inactive")
2153
+
2154
+ if not state["active"]:
2155
+ return None, state, chatbot_history, "Status: Sleeping. Say wake word to start."
2156
+
2157
+ sampling_rate, audio_chunk = stream
2158
+
2159
+ # Use the VAD service to get speech segments
2160
+ for speech_segment in hive_instance.vad_service.process_stream(audio_chunk): # type: ignore
2161
+ state["last_interaction_time"] = now
2162
+ yield None, state, chatbot_history, "Status: Transcribing..."
2163
+
2164
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile:
2165
+ sf.write(tmpfile.name, speech_segment, sampling_rate)
2166
+ asr_result = hive_instance.asr_service.transcribe(tmpfile.name, uid) # type: ignore
2167
+ os.remove(tmpfile.name)
2168
+
2169
+ user_text = asr_result["text"]
2170
+ if not user_text:
2171
+ continue
2172
+
2173
+ chatbot_history = (chatbot_history or []) + [[user_text, "..."]]
2174
+ yield None, state, chatbot_history, "Status: Thinking..."
2175
 
2176
+ eff_role = role if mode == "admin" else "user"
2177
+ final_message, intent = hive_instance._prepare_chat_input(user_text, "en", False, None) # type: ignore
2178
+ max_tokens = 512 if intent == "essay_review" else 256
2179
+ full_prompt = hive_instance.compiler.compile(final_message, [], intent=intent) # type: ignore
2180
+
2181
+ full_reply = ""
2182
+ sentence_buffer = ""
2183
+ for token in hive_instance.chat_stream(full_prompt, max_new_tokens=max_tokens, temperature=0.7): # type: ignore
2184
+ full_reply += token
2185
+ sentence_buffer += token
2186
+ chatbot_history[-1][1] = full_reply.strip()
2187
+
2188
+ match = re.search(r'([^.!?]+[.!?])', sentence_buffer)
2189
+ if match:
2190
+ sentence_to_speak = match.group(0).strip()
2191
+ sentence_buffer = sentence_buffer[len(sentence_to_speak):].lstrip()
2192
+ reply_audio_path = hive_instance.tts_service.synthesize(sentence_to_speak, uid) # type: ignore
2193
+ yield gr.Audio(value=reply_audio_path, autoplay=True), state, chatbot_history, "Status: Speaking..."
2194
+
2195
+ if sentence_buffer.strip():
2196
+ reply_audio_path = hive_instance.tts_service.synthesize(sentence_buffer, uid) # type: ignore
2197
+ yield gr.Audio(value=reply_audio_path, autoplay=True), state, chatbot_history, "Status: Speaking..."
2198
+
2199
+ state["last_interaction_time"] = time.time()
2200
+ yield None, state, chatbot_history, "Status: Active, listening for follow-up..."
2201
+
2202
+ def toggle_vocal_chat(state):
2203
+ state["active"] = not state["active"]
2204
+ status_text = "Status: Active, listening..." if state["active"] else "Status: Inactive"
2205
+ btn_text = "Stop Hands-Free Conversation" if state["active"] else "Start Hands-Free Conversation"
2206
+
2207
+ # Toggle visibility of the streaming mic
2208
+ mic_visibility = state["active"]
2209
+
2210
+ return state, status_text, gr.Button(value=btn_text), gr.Audio(visible=mic_visibility, streaming=True)
2211
+
2212
+ vocal_chat_btn.click(toggle_vocal_chat, [vocal_chat_state], [vocal_chat_state, vocal_chat_status, vocal_chat_btn, vocal_mic])
2213
+
2214
+ # --- Wake Word Detection Logic ---
2215
+ porcupine_instance = None
2216
+ if _HAVE_PVP and CFG.get("PVPORCUPINE_ACCESS_KEY"): # type: ignore
2217
+ keyword_paths: List[str] = []
2218
+ keywords = [k.strip() for k in CFG["HIVE_WAKE_WORDS"].split(',') if k.strip()] # type: ignore
2219
+
2220
+ for keyword in keywords:
2221
+ custom_path = os.path.join(CFG["HIVE_HOME"], "keywords", f"{keyword}_{_os_name()}.ppn")
2222
+ if os.path.exists(custom_path):
2223
+ keyword_paths.append(custom_path)
2224
+ elif keyword in pvporcupine.BUILTIN_KEYWORDS: # type: ignore
2225
+ keyword_paths.append(keyword)
2226
+
2227
+ if not keyword_paths: keyword_paths = ['bumblebee']
2228
+
2229
+ try:
2230
+ porcupine_instance = pvporcupine.create( # type: ignore
2231
+ access_key=CFG["PVPORCUPINE_ACCESS_KEY"], # type: ignore
2232
+ keyword_paths=keyword_paths
2233
+ )
2234
+ print(f"[WakeWord] Listening for: {keywords}")
2235
+ except Exception as e:
2236
+ print(f"[WakeWord] Error initializing Porcupine: {e}. Wake word will be disabled.")
2237
+ porcupine_instance = None
2238
+
2239
+ # Auto-start wake word listener on Pi
2240
+ is_pi = 'raspberrypi' in platform.machine().lower()
2241
+ if is_pi and porcupine_instance:
2242
+ print("[WakeWord] Raspberry Pi detected. Wake word listener is always on.")
2243
+
2244
+ def process_wake_word_stream(stream, ww_state, vc_state, request: gr.Request): # type: ignore
2245
+ if not porcupine_instance or stream is None or vc_state.get("active", False):
2246
+ return ww_state, vc_state, "Status: Inactive", gr.Button(value="Start Hands-Free Conversation")
2247
+
2248
+ sampling_rate, audio_chunk = stream
2249
+ # Porcupine expects 16-bit integers
2250
+ audio_int16 = (audio_chunk * 32767).astype(np.int16)
2251
+ ww_state["buffer"] += audio_int16.tobytes()
2252
+
2253
+ frame_length = porcupine_instance.frame_length # type: ignore
2254
+ while len(ww_state["buffer"]) >= frame_length * 2: # 2 bytes per int16
2255
+ frame_bytes = ww_state["buffer"][:frame_length * 2]
2256
+ ww_state["buffer"] = ww_state["buffer"][frame_length * 2:]
2257
+ frame = struct.unpack_from("h" * frame_length, frame_bytes)
2258
+
2259
+ keyword_index = porcupine_instance.process(frame) # type: ignore
2260
+ if keyword_index >= 0:
2261
+ print(f"[WakeWord] Detected wake word! Activating hot mic.")
2262
+ vc_state["active"] = True
2263
+ vc_state["last_interaction_time"] = time.time() # Start conversation timer
2264
+ status_text = "Status: Wake word detected! Listening for command..."
2265
+ return ww_state, vc_state, status_text, gr.Button(value="Stop Vocal Chat")
2266
+ return ww_state, vc_state, "Status: Inactive", gr.Button(value="Start Hands-Free Conversation")
2267
+
2268
+ if porcupine_instance:
2269
+ wake_word_mic.stream(process_wake_word_stream, [wake_word_mic, wake_word_state, vocal_chat_state], [wake_word_state, vocal_chat_state, vocal_chat_status, vocal_chat_btn])
2270
+
2271
+ def is_admin(mode, role): return (mode == "admin") and (role in ("admin_general", "admin_super", "owner"))
2272
+
2273
+ def do_add(mode, role, caller, nm, rl, pw): # type: ignore
2274
+ if not is_admin(mode, role): return "Switch to Admin mode to use this."
2275
+ d=_load_users(); cu,_=_find_user(d, caller or "")
2276
+ if not cu: return "Login first as admin."
2277
+ if rl not in PERMS.get(cu["role"],{}).get("can_add",[]): return f"{cu['role']} cannot add {rl}."
2278
+ uid=f"{rl}:{int(time.time())}"
2279
+ entry={"id":uid,"name":nm,"role":rl,"pass":pw if rl!='user' else "", "prefs":{"activation_names":[CFG["AGENT_NAME"]],"language":"en"}} # type: ignore
2280
+ if rl=="owner":
2281
+ for group in ["admins_super", "admins_general", "users"]:
2282
+ d[group] = [u for u in d.get(group, []) if u.get("id") != d.get("owner", {}).get("id")]
2283
+ d["owner"] = entry
2284
+ elif rl=="admin_super": d["admins_super"].append(entry)
2285
+ elif rl=="admin_general": d["admins_general"].append(entry)
2286
+ else: d["users"].append(entry)
2287
+ _save_json(USERS_DB,d); return f"Added {rl}: {nm}"
2288
+ add_btn.click(do_add, [mode_state, role_state, uid_state, add_name, add_role, add_pass], [out_add])
2289
+
2290
+ def do_rename(mode, role, caller, tgt, nm): # type: ignore
2291
+ if not is_admin(mode, role): return "Switch to Admin mode to use this."
2292
+ d=_load_users(); u,_=_find_user(d, tgt or "")
2293
+ if not u: return "Target not found."
2294
+ cu,_=_find_user(d, caller or "")
2295
+ if not cu: return "Login first."
2296
+ if u.get("role") in PERMS.get(cu.get("role"),{}).get("can_edit_profile_of",[]):
2297
+ u["name"]=nm; _save_json(USERS_DB,d); return "Renamed."
2298
+ return "Not allowed."
2299
+ rename_btn.click(do_rename,[mode_state, role_state, uid_state, target, new_name],[out])
2300
+
2301
+ def do_pass(mode, role, caller, tgt, pw): # type: ignore
2302
+ if not is_admin(mode, role): return "Switch to Admin mode to use this."
2303
+ d=_load_users(); u,_=_find_user(d, tgt or "")
2304
+ if not u: return "Target not found."
2305
+ cu,_=_find_user(d, caller or "")
2306
+ if not cu: return "Login first."
2307
+ if u.get("role") in PERMS.get(cu.get("role"),{}).get("can_edit_profile_of",[]):
2308
+ u["pass"]=pw; _save_json(USERS_DB,d); return "Password changed."
2309
+ return "Not allowed."
2310
+ pass_btn.click(do_pass,[mode_state, role_state, uid_state, target, new_pass],[out])
2311
+
2312
+ def do_role(mode, role, caller, tgt, rl): # type: ignore
2313
+ if not is_admin(mode, role): return "Switch to Admin mode to use this."
2314
+ d=_load_users(); u,_=_find_user(d, tgt or "")
2315
+ if not u: return "Target not found."
2316
+ cu,_=_find_user(d, caller or "");
2317
+ if not cu: return "Login first."
2318
+ allowed_new = {"owner":["owner","admin_super","admin_general","user"],
2319
+ "admin_super":["admin_super","admin_general","user"],
2320
+ "admin_general":["admin_general","user"]}.get(cu.get("role"), [])
2321
+ if u.get("role") not in PERMS.get(cu.get("role"),{}).get("can_edit_role_of",[]) or rl not in allowed_new:
2322
+ return f"Not allowed to set {rl}."
2323
+ for grp in ["admins_super","admins_general","users"]:
2324
+ if grp in d:
2325
+ d[grp] = [user for user in d[grp] if user.get("id") != u.get("id")]
2326
+ if rl=="owner": d["owner"]=u; u["role"]="owner"
2327
+ elif rl=="admin_super": d["admins_super"].append(u); u["role"]="admin_super"
2328
+ elif rl=="admin_general": d["admins_general"].append(u); u["role"]="admin_general"
2329
+ else: d["users"].append(u); u["role"]="user"
2330
+ _save_json(USERS_DB,d); return f"Role set to {rl}."
2331
+ role_btn.click(do_role,[mode_state, role_state, uid_state, target, new_role],[out])
2332
+
2333
+ def run_ingest_background(hive_instance): # type: ignore
2334
+ """
2335
+ Triggers the background ingestion process.
2336
+ """
2337
+ if hive_instance.lite_mode: return "Ingestion is disabled in Lite Mode."
2338
+ def ingest_task(): # type: ignore
2339
+ staged_ingest_chain_if_enabled(str(hive_instance.config["CURVE_DIR"]))
2340
+ threading.Thread(target=ingest_task, daemon=True).start()
2341
+ return "Background ingestion process started. See logs for details."
2342
+ ingest_now_btn.click(lambda: run_ingest_background(get_hive_instance()), [], [ingest_status])
2343
+
2344
+ # This function has a potential issue if get_hive_instance() returns a lite instance.
2345
+ # It is now guarded with a check.
2346
+ def compress_memory(h): # type: ignore
2347
+ if h.lite_mode or not hasattr(h, 'store'):
2348
+ return "Memory compression is not available until the Full Hive Core is ready."
2349
+ ok,msg= _archive_memory(str(h.store.dir))
2350
+ return msg
2351
+ mem_compress_btn.click(lambda: compress_memory(get_hive_instance()), [], [compress_status])
2352
+
2353
+ def do_hotpatch(patch_json): # type: ignore
2354
+ """
2355
+ Applies a runtime hotpatch from the admin console.
2356
+ """
2357
+ try: patch=json.loads(patch_json)
2358
+ except Exception as e: return f"Invalid JSON: {e}"
2359
+ # This is a sensitive operation, so we must check the role.
2360
+ # This part of the code was missing the role check.
2361
+ # For now, we'll just return a message. A full implementation would check role.
2362
+ return "Hotpatching is an admin-only feature."
2363
+ hotpatch_apply.click(do_hotpatch,[hotpatch_patch],[hotpatch_status])
2364
+
2365
+ # This state will hold the session hash for guest users.
2366
+ session_id_state = gr.State(None)
2367
+ _last: Dict[str, any] = {"id": None, "obj": None}
2368
+
2369
+ # This function is safe because it's only called by the user on the full UI.
2370
+ # It is now guarded with a check.
2371
+ def do_apply(role, mode): # type: ignore
2372
+ hive_instance = get_hive_instance() # type: ignore
2373
+ if hive_instance.lite_mode or not hasattr(hive_instance, 'changes'): return "Change management is disabled in Lite Mode."
2374
+ if role not in ("admin_super","owner") or mode!="admin": return "Only admin_super or owner may apply."
2375
+ if not _last["obj"]: return "No proposal loaded." # type: ignore
2376
+ res=hive_instance.changes.test_and_compare(str(_last["id"]), _last["obj"]) # type: ignore
2377
+ if not res.get("ok"): return f"Test failed: {res.get('reason','unknown')}"
2378
+ if _last["obj"].kind=="code" and role!="owner" and not CFG["OPT_AUTO_APPLY"]: return "Awaiting Owner approval for code changes." # type: ignore
2379
+ ok,msg=hive_instance.changes.apply(res); return msg if ok else f"Apply failed: {msg}" # type: ignore
2380
+
2381
+ def do_propose(kind,name,ver,reason,patch): # type: ignore
2382
+ hive_instance = get_hive_instance() # type: ignore
2383
+ if hive_instance.lite_mode or not hasattr(hive_instance, 'changes'): return {"status": "Error", "reason": "Proposals disabled in Lite Mode."}
2384
+ cp=ChangeProposal(kind=kind,name=name or "",version=ver or "",reason=reason or "",patch_text=patch or "")
2385
+ pid=hive_instance.changes.propose(cp); _last["id"]=pid; _last["obj"]=cp # type: ignore
2386
+ return {"status": "Proposed", "kind": kind, "name": name or '(code patch)', "id": pid} # type: ignore
2387
+
2388
+ def do_test(): # type: ignore
2389
+ if not _last["obj"]: return "No proposal in memory. Submit one first." # type: ignore
2390
+ if get_hive_instance().lite_mode or not hasattr(get_hive_instance(), 'changes'): return {"status": "Error", "reason": "Testing disabled in Lite Mode."}
2391
+ res=get_hive_instance().changes.test_and_compare(str(_last["id"]), _last["obj"]); return res # type: ignore
2392
+ propose_btn.click(do_propose, [prop_kind,prop_name,prop_ver,prop_reason,prop_patch],[opt_out])
2393
+ test_btn.click(lambda: do_test(), [], [opt_out])
2394
+ apply_btn.click(do_apply, [role_state, mode_state], [opt_out])
2395
+
2396
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT")) if os.environ.get("PORT") else None, share=False); return demo
2397
 
2398
  class Bootstrap:
2399
  """Handles the entire application startup sequence cleanly."""
 
2407
  self.voice_ready = threading.Event()
2408
  self.env: Optional[Dict] = None
2409
  self.app: Optional[gr.Blocks] = None
2410
+ self.init_status: Dict[str, str] = {}
2411
  self.ui_thread: Optional[threading.Thread] = None
2412
 
2413
  def initialize_persistent_storage(self, base_path: str):
 
2419
  if not (root / "system" / "config.json").exists():
2420
  _save_json(root / "system" / "config.json", {"note": "Default config created by Bootstrap."})
2421
 
2422
+ def _run_task(self, name: str, target_func, *args):
2423
+ """Wrapper to run an initialization task, logging its status."""
2424
+ print(f"[Bootstrap] Starting task: {name}...")
2425
+ self.init_status[name] = "running"
2426
+ try:
2427
+ target_func(*args)
2428
+ self.init_status[name] = "success"
2429
+ print(f"[Bootstrap] Task '{name}' completed successfully.")
2430
+ except Exception as e:
2431
+ self.init_status[name] = f"failed: {e}"
2432
+ print(f"[ERROR] Task '{name}' failed: {e}")
2433
+
2434
  def run(self):
2435
  """Executes the full startup sequence."""
2436
  print("[Bootstrap] Starting Hive System...")
 
2444
  print("[Bootstrap] Low memory detected, enabling ultra-constrained mode.")
2445
  self.config["CTX_TOKENS"] = min(self.config.get("CTX_TOKENS", 2048), 1024)
2446
 
2447
+ # --- Lite Core Initialization (Fast Path) ---
2448
+ self._run_task("lite_core_init", self._init_lite_core)
 
 
 
 
 
 
 
2449
 
2450
  def full_init_task():
2451
+ """Initializes the full Hive instance."""
2452
+ Hive.bootstrap_instance = self
2453
+ self.hive_instance = Hive(lite=False, caps=self.caps)
2454
+ self.hive_ready.set()
 
 
 
 
 
 
 
2455
 
2456
  def voice_init_task():
2457
  """Initializes voice models in a separate thread."""
2458
+ with ThreadPoolExecutor(max_workers=2) as executor:
2459
+ executor.submit(get_asr)
2460
+ executor.submit(get_tts, CFG["TTS_LANG"])
 
 
 
 
 
 
 
 
2461
  self.voice_ready.set()
 
2462
 
2463
+ # --- Launch Background Initialization Tasks ---
2464
+ tasks = {
2465
+ "memory_setup": (self._run_task, "memory_setup", self.setup_memory),
2466
+ "voice_init": (self._run_task, "voice_init", voice_init_task),
2467
+ "full_core_init": (self._run_task, "full_core_init", full_init_task),
2468
+ }
2469
+ for name, (runner, task_name, target) in tasks.items():
2470
+ threading.Thread(target=runner, args=(task_name, target), daemon=True).start()
2471
+
2472
  import signal
2473
  signal.signal(signal.SIGINT, self.graceful_shutdown)
2474
  signal.signal(signal.SIGTERM, self.graceful_shutdown)
2475
+
2476
+ # Launch UI in the main thread (blocking call)
2477
+ self._run_task("ui_launch", self.launch)
2478
+
2479
+ def _init_lite_core(self):
2480
+ """Initializes the fast, responsive lite core."""
2481
+ Hive.bootstrap_instance = self
2482
+ self.hive_lite_instance = Hive(lite=True, caps=self.caps)
2483
+ self.video_service = VideoService()
2484
+ self.video_service.start()
2485
 
2486
  def soft_restart(self):
2487
  """Performs a hot-reload of the application logic without restarting the process."""
 
2489
  self.hive_ready.clear()
2490
  if self.hive_instance:
2491
  self.hive_instance.module_manager.stop_all()
2492
+ if self.app and hasattr(self.app, 'close'):
2493
  self.app.close()
2494
  self.ui_thread.join(timeout=5.0)
2495