ryomo commited on
Commit
af6d7ef
·
1 Parent(s): 1c2654e

feat: enable chat functionality through environment variable

Browse files
.env.sample CHANGED
@@ -1,3 +1,4 @@
 
1
  LOGGING_LEVEL=DEBUG
2
  ENABLE_GRADIO_DEPRECATION_WARNING=false
3
  USE_MODAL=false
 
1
+ ENABLE_CHAT=false
2
  LOGGING_LEVEL=DEBUG
3
  ENABLE_GRADIO_DEPRECATION_WARNING=false
4
  USE_MODAL=false
README.md CHANGED
@@ -34,7 +34,7 @@ uv run gradio app.py
34
 
35
  The Chat Tab is hidden by default, because it is not fully implemented yet.
36
 
37
- To enable it, change `with gr.TabItem("Chat", visible=False):` to `with gr.TabItem("Chat", visible=True):` in `app.py`.
38
 
39
  ### Modal
40
 
 
34
 
35
  The Chat Tab is hidden by default, because it is not fully implemented yet.
36
 
37
+ To enable it, change `ENABLE_CHAT` to `true` in your `.env` file.
38
 
39
  ### Modal
40
 
app.py CHANGED
@@ -22,7 +22,11 @@ from unpredictable_lord.mcp_server.mcp_server import (
22
  init_game,
23
  list_available_advice,
24
  )
25
- from unpredictable_lord.settings import ENABLE_GRADIO_DEPRECATION_WARNING, LOGGING_LEVEL
 
 
 
 
26
  from unpredictable_lord.utils import get_tool_prefix, update_guide_with_tool_prefix
27
 
28
  # Configure logging level from environment variable
@@ -66,9 +70,11 @@ with gr.Blocks(title="Unpredictable Lord") as demo:
66
  chat_session_id = gr.State(value=None)
67
  chat_system_instructions = gr.State(value="")
68
 
69
- with gr.Tabs(selected="mcp_server"):
 
 
70
  # Chat Tab (hidden - not ready for release)
71
- with gr.TabItem("Chat", visible=False):
72
  with gr.Row():
73
  # Left column: Chat interface
74
  with gr.Column(scale=3):
 
22
  init_game,
23
  list_available_advice,
24
  )
25
+ from unpredictable_lord.settings import (
26
+ ENABLE_CHAT,
27
+ ENABLE_GRADIO_DEPRECATION_WARNING,
28
+ LOGGING_LEVEL,
29
+ )
30
  from unpredictable_lord.utils import get_tool_prefix, update_guide_with_tool_prefix
31
 
32
  # Configure logging level from environment variable
 
70
  chat_session_id = gr.State(value=None)
71
  chat_system_instructions = gr.State(value="")
72
 
73
+ SELECTED_TAB = "chat" if ENABLE_CHAT else "mcp_server"
74
+
75
+ with gr.Tabs(selected=SELECTED_TAB):
76
  # Chat Tab (hidden - not ready for release)
77
+ with gr.TabItem("Chat", id="chat", visible=ENABLE_CHAT):
78
  with gr.Row():
79
  # Left column: Chat interface
80
  with gr.Column(scale=3):
src/unpredictable_lord/chat/chat.py CHANGED
@@ -15,7 +15,7 @@ from unpredictable_lord.chat.chat_tools import (
15
  execute_tool_calls,
16
  extract_tool_calls,
17
  )
18
- from unpredictable_lord.settings import USE_MODAL
19
 
20
  logger = logging.getLogger(__name__)
21
 
@@ -24,10 +24,14 @@ logger = logging.getLogger(__name__)
24
  # Higher values allow for complex multi-step reasoning if needed
25
  MAX_AGENT_ITERATIONS = 3
26
 
 
 
27
 
28
- if USE_MODAL:
29
  import modal
30
 
 
 
31
  APP_NAME = "unpredictable-lord"
32
  _generate_stream = modal.Function.from_name(APP_NAME, "generate_stream")
33
 
@@ -38,6 +42,8 @@ if USE_MODAL:
38
  else:
39
  from unpredictable_lord.chat.llm_zerogpu import generate_stream as _generate_stream
40
 
 
 
41
  async def generate_stream(input_tokens):
42
  logger.info("Calling ZeroGPU LLM generate_stream (sync wrapper)")
43
  # Note: This blocks the event loop, but is acceptable for ZeroGPU/Spaces
 
15
  execute_tool_calls,
16
  extract_tool_calls,
17
  )
18
+ from unpredictable_lord.settings import ENABLE_CHAT, USE_MODAL
19
 
20
  logger = logging.getLogger(__name__)
21
 
 
24
  # Higher values allow for complex multi-step reasoning if needed
25
  MAX_AGENT_ITERATIONS = 3
26
 
27
+ if not ENABLE_CHAT:
28
+ logger.info("Chat functionality is disabled.")
29
 
30
+ elif USE_MODAL:
31
  import modal
32
 
33
+ logger.info("Using Modal LLM backend for chat")
34
+
35
  APP_NAME = "unpredictable-lord"
36
  _generate_stream = modal.Function.from_name(APP_NAME, "generate_stream")
37
 
 
42
  else:
43
  from unpredictable_lord.chat.llm_zerogpu import generate_stream as _generate_stream
44
 
45
+ logger.info("Using ZeroGPU LLM backend for chat")
46
+
47
  async def generate_stream(input_tokens):
48
  logger.info("Calling ZeroGPU LLM generate_stream (sync wrapper)")
49
  # Note: This blocks the event loop, but is acceptable for ZeroGPU/Spaces
src/unpredictable_lord/settings.py CHANGED
@@ -4,6 +4,7 @@ from dotenv import load_dotenv
4
 
5
  load_dotenv()
6
 
 
7
  LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO").upper()
8
  ENABLE_GRADIO_DEPRECATION_WARNING = (
9
  os.getenv("ENABLE_GRADIO_DEPRECATION_WARNING", "false").lower() == "true"
 
4
 
5
  load_dotenv()
6
 
7
+ ENABLE_CHAT = os.getenv("ENABLE_CHAT", "false").lower() == "true"
8
  LOGGING_LEVEL = os.getenv("LOGGING_LEVEL", "INFO").upper()
9
  ENABLE_GRADIO_DEPRECATION_WARNING = (
10
  os.getenv("ENABLE_GRADIO_DEPRECATION_WARNING", "false").lower() == "true"