GitHub Action commited on
Commit
439ab17
·
1 Parent(s): 63e4846

Sync ling-space changes from GitHub commit d5d4701

Browse files
app.py CHANGED
@@ -10,9 +10,9 @@ from tab_test import run_model_handler_test, run_clear_chat_test
10
 
11
  def get_history_df(history):
12
  if not history:
13
- return pd.DataFrame({'ID': [], 'Conversation': []})
14
  df = pd.DataFrame(history)
15
- return df[['id', 'title']].rename(columns={'id': 'ID', 'title': 'Conversation'})
16
 
17
  def on_app_load(history, conv_id):
18
  """
@@ -24,7 +24,7 @@ def on_app_load(history, conv_id):
24
  if not history:
25
  # First time ever loading, create a new chat
26
  conv_id = str(uuid.uuid4())
27
- new_convo = { "id": conv_id, "title": "New Conversation", "messages": [], "timestamp": datetime.now().isoformat() }
28
  history = [new_convo]
29
  return conv_id, history, gr.update(value=get_history_df(history)), []
30
 
@@ -51,6 +51,13 @@ CSS = """
51
  footer {
52
  display: none !important;
53
  }
 
 
 
 
 
 
 
54
  """
55
 
56
  if __name__ == "__main__":
 
10
 
11
  def get_history_df(history):
12
  if not history:
13
+ return pd.DataFrame({'ID': [], '对话': []})
14
  df = pd.DataFrame(history)
15
+ return df[['id', 'title']].rename(columns={'id': 'ID', 'title': '对话'})
16
 
17
  def on_app_load(history, conv_id):
18
  """
 
24
  if not history:
25
  # First time ever loading, create a new chat
26
  conv_id = str(uuid.uuid4())
27
+ new_convo = { "id": conv_id, "title": "(新对话)", "messages": [], "timestamp": datetime.now().isoformat() }
28
  history = [new_convo]
29
  return conv_id, history, gr.update(value=get_history_df(history)), []
30
 
 
51
  footer {
52
  display: none !important;
53
  }
54
+
55
+ /* Disable transition and animation for no-transition class */
56
+ .no-transition, .no-transition * {
57
+ transition: none !important;
58
+ animation: none !important;
59
+ animation-play-state: paused !important;
60
+ }
61
  """
62
 
63
  if __name__ == "__main__":
config.py CHANGED
@@ -42,42 +42,42 @@ CHAT_MODEL_SPECS = {
42
  "provider": "openai_compatible",
43
  "model_id": "inclusionai/ling-mini-2.0",
44
  "display_name": "🦉 Ling-mini-2.0",
45
- "description": "A lightweight conversational model optimized for efficient operation on consumer-grade hardware, ideal for mobile or localized deployment scenarios.",
46
  "url": "https://huggingface.co/inclusionai"
47
  },
48
  LING_1T: {
49
  "provider": "openai_compatible",
50
  "model_id": "inclusionai/ling-1t",
51
  "display_name": "🦉 Ling-1T",
52
- "description": "A trillion-parameter large language model designed for complex natural language understanding and generation tasks that require extreme performance and high fluency.",
53
  "url": "https://huggingface.co/inclusionai"
54
  },
55
  LING_FLASH_2_0: {
56
  "provider": "openai_compatible",
57
  "model_id": "inclusionai/ling-flash-2.0",
58
  "display_name": "🦉 Ling-flash-2.0",
59
- "description": "A high-performance billion-parameter model optimized for scenarios requiring high-speed response and complex instruction following.",
60
  "url": "https://huggingface.co/inclusionai"
61
  },
62
  RING_1T: {
63
  "provider": "openai_compatible",
64
  "model_id": "inclusionai/ring-1t",
65
  "display_name": "💍️ Ring-1T",
66
- "description": "A brand-new trillion-parameter reasoning model with powerful code generation and tool use capabilities.",
67
  "url": "https://huggingface.co/inclusionai"
68
  },
69
  RING_FLASH_2_0: {
70
  "provider": "openai_compatible",
71
  "model_id": "inclusionai/ring-flash-2.0",
72
  "display_name": "💍️ Ring-flash-2.0",
73
- "description": "A billion-parameter reasoning model that strikes a good balance between performance and cost, suitable for general-purpose tasks that require step-by-step thinking or code generation.",
74
  "url": "https://huggingface.co/inclusionai"
75
  },
76
  RING_MINI_2_0: {
77
  "provider": "openai_compatible",
78
  "model_id": "inclusionai/ring-mini-2.0",
79
  "display_name": "💍️ Ring-mini-2.0",
80
- "description": "A quantized and extremely efficient reasoning model designed for resource-constrained environments with strict speed and efficiency requirements (such as edge computing).",
81
  "url": "https://huggingface.co/inclusionai"
82
  }
83
  }
 
42
  "provider": "openai_compatible",
43
  "model_id": "inclusionai/ling-mini-2.0",
44
  "display_name": "🦉 Ling-mini-2.0",
45
+ "description": "轻量级对话模型,专为消费级硬件的高效运行而优化,是移动端或本地化部署场景的理想选择。",
46
  "url": "https://huggingface.co/inclusionai"
47
  },
48
  LING_1T: {
49
  "provider": "openai_compatible",
50
  "model_id": "inclusionai/ling-1t",
51
  "display_name": "🦉 Ling-1T",
52
+ "description": "万亿参数的大型语言模型,专为需要极致性能和高流畅度的复杂自然语言理解与生成任务而设计。",
53
  "url": "https://huggingface.co/inclusionai"
54
  },
55
  LING_FLASH_2_0: {
56
  "provider": "openai_compatible",
57
  "model_id": "inclusionai/ling-flash-2.0",
58
  "display_name": "🦉 Ling-flash-2.0",
59
+ "description": "高性能十亿参数模型,针对需要高速响应和复杂指令遵循的场景进行了优化。",
60
  "url": "https://huggingface.co/inclusionai"
61
  },
62
  RING_1T: {
63
  "provider": "openai_compatible",
64
  "model_id": "inclusionai/ring-1t",
65
  "display_name": "💍️ Ring-1T",
66
+ "description": "全新的万亿参数推理模型,具备强大的代码生成和工具使用能力。",
67
  "url": "https://huggingface.co/inclusionai"
68
  },
69
  RING_FLASH_2_0: {
70
  "provider": "openai_compatible",
71
  "model_id": "inclusionai/ring-flash-2.0",
72
  "display_name": "💍️ Ring-flash-2.0",
73
+ "description": "十亿参数推理模型,在性能与成本之间取得了良好的平衡,适用于需要逐步思考或代码生成的通用任务。",
74
  "url": "https://huggingface.co/inclusionai"
75
  },
76
  RING_MINI_2_0: {
77
  "provider": "openai_compatible",
78
  "model_id": "inclusionai/ring-mini-2.0",
79
  "display_name": "💍️ Ring-mini-2.0",
80
+ "description": "一款专为资源受限环境设计的量化且极其高效的推理模型,满足严格的速度和效率要求(如边缘计算)。",
81
  "url": "https://huggingface.co/inclusionai"
82
  }
83
  }
smart_writer_kit/agent_common_utils.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ def format_df_to_string(df: pd.DataFrame, title: str) -> str:
4
+ """Formats a pandas DataFrame into a markdown-like string for the prompt."""
5
+ if df is None or df.empty:
6
+ return ""
7
+
8
+ header = f"### {title}\n"
9
+ rows = []
10
+ for _, row in df.iterrows():
11
+ if 'Done' in df.columns and 'Task' in df.columns:
12
+ status = "[x]" if row['Done'] else "[ ]"
13
+ rows.append(f"- {status} {row['Task']}")
14
+ elif 'Term' in df.columns and 'Description' in df.columns:
15
+ rows.append(f"- **{row['Term']}**: {row['Description']}")
16
+
17
+ return header + "\n".join(rows) + "\n\n"
smart_writer_kit/agent_for_inspiration_expansion.py CHANGED
@@ -2,26 +2,12 @@ import gradio as gr
2
  import pandas as pd
3
  from model_handler import ModelHandler
4
  from config import LING_1T
5
-
6
- def _format_df_to_string(df: pd.DataFrame, title: str) -> str:
7
- """Formats a pandas DataFrame into a markdown-like string for the prompt."""
8
- if df is None or df.empty:
9
- return ""
10
-
11
- header = f"### {title}\n"
12
- rows = []
13
- for _, row in df.iterrows():
14
- if 'Done' in df.columns and 'Task' in df.columns:
15
- status = "[x]" if row['Done'] else "[ ]"
16
- rows.append(f"- {status} {row['Task']}")
17
- elif 'Term' in df.columns and 'Description' in df.columns:
18
- rows.append(f"- **{row['Term']}**: {row['Description']}")
19
-
20
- return header + "\n".join(rows) + "\n\n"
21
 
22
  def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
23
  """
24
  Agent for fetching inspiration options using a real LLM.
 
25
  """
26
  print("\n[Agent][fetch_inspiration_agent] === 推理类型:灵感扩写 ===")
27
  print("【发出的完整上下文】")
@@ -35,9 +21,9 @@ def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df:
35
  try:
36
  # 1. Format context from UI inputs
37
  style_context = f"### 整体章程\n{style}\n\n"
38
- kb_context = _format_df_to_string(kb_df, "知识库")
39
- short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
40
- long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")
41
 
42
  # 2. Build System Prompt
43
  system_prompt = (
@@ -82,50 +68,6 @@ def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df:
82
  error_message = f"获取灵感时出错: {e}"
83
  return gr.update(visible=True), error_message, "请检查日志", "请检查日志"
84
 
85
-
86
- def fetch_paragraph_continuation_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
87
- """
88
- Agent for fetching a single paragraph continuation (Ribbon UI version).
89
- """
90
- print("\n[Agent][fetch_paragraph_continuation_agent] === 推理类型:整段续写 (Single) ===")
91
- try:
92
- # 1. Format context
93
- style_context = f"### 整体章程\n{style}\n\n"
94
- kb_context = _format_df_to_string(kb_df, "知识库")
95
- short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
96
- long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")
97
-
98
- # 2. Build System Prompt
99
- system_prompt = (
100
- "你是一个富有创意的长篇小说家。请根据提供的背景设定和当前文本,自然地续写一段高质量的剧情。\n"
101
- "请直接输出续写内容,不要包含任何解释、前缀或后缀。"
102
- )
103
-
104
- # 3. Build User Prompt
105
- full_context = style_context + kb_context + long_outline_context + short_outline_context
106
- user_prompt = (
107
- f"### 背景设定与大纲\n{full_context}\n"
108
- f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
109
- f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情,写一个完整的段落。'}"
110
- )
111
-
112
- # 4. Call LLM
113
- model_handler = ModelHandler()
114
- response_generator = model_handler.generate_code(
115
- system_prompt=system_prompt,
116
- user_prompt=user_prompt,
117
- model_choice=LING_1T
118
- )
119
-
120
- full_response = "".join(chunk for chunk in response_generator)
121
- return full_response.strip()
122
-
123
- except Exception as e:
124
- print(f"[Agent] Error fetching paragraph continuation: {e}")
125
- return f"获取续写时出错: {e}"
126
-
127
-
128
-
129
  def apply_inspiration_agent(current_text: str, inspiration_text: str):
130
  """
131
  Agent for applying selected inspiration to the editor.
@@ -141,4 +83,4 @@ def apply_inspiration_agent(current_text: str, inspiration_text: str):
141
  print("【收到的完整上下文】")
142
  print("new_text:", repr(new_text))
143
  # Return a tuple that unpacks into the outputs for the Gradio event handler
144
- return new_text, gr.update(visible=False), ""
 
2
  import pandas as pd
3
  from model_handler import ModelHandler
4
  from config import LING_1T
5
+ from .agent_common_utils import format_df_to_string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
8
  """
9
  Agent for fetching inspiration options using a real LLM.
10
+ (Original 3-option expansion logic)
11
  """
12
  print("\n[Agent][fetch_inspiration_agent] === 推理类型:灵感扩写 ===")
13
  print("【发出的完整上下文】")
 
21
  try:
22
  # 1. Format context from UI inputs
23
  style_context = f"### 整体章程\n{style}\n\n"
24
+ kb_context = format_df_to_string(kb_df, "知识库")
25
+ short_outline_context = format_df_to_string(short_outline_df, "当前章节大纲")
26
+ long_outline_context = format_df_to_string(long_outline_df, "故事总纲")
27
 
28
  # 2. Build System Prompt
29
  system_prompt = (
 
68
  error_message = f"获取灵感时出错: {e}"
69
  return gr.update(visible=True), error_message, "请检查日志", "请检查日志"
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  def apply_inspiration_agent(current_text: str, inspiration_text: str):
72
  """
73
  Agent for applying selected inspiration to the editor.
 
83
  print("【收到的完整上下文】")
84
  print("new_text:", repr(new_text))
85
  # Return a tuple that unpacks into the outputs for the Gradio event handler
86
+ return new_text, gr.update(visible=False), ""
smart_writer_kit/agent_for_paragraph_continuation.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from model_handler import ModelHandler
3
+ from config import LING_1T
4
+ from .agent_common_utils import format_df_to_string
5
+
6
+ def fetch_paragraph_continuation_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
7
+ """
8
+ Agent for fetching a single paragraph continuation (Ribbon UI version).
9
+ """
10
+ print("\n[Agent][fetch_paragraph_continuation_agent] === 推理类型:整段续写 (Single) ===")
11
+ try:
12
+ # 1. Format context
13
+ style_context = f"### 整体章程\n{style}\n\n"
14
+ kb_context = format_df_to_string(kb_df, "知识库")
15
+ short_outline_context = format_df_to_string(short_outline_df, "当前章节大纲")
16
+ long_outline_context = format_df_to_string(long_outline_df, "故事总纲")
17
+
18
+ # 2. Build System Prompt
19
+ system_prompt = (
20
+ "你是一个富有创意的长篇小说家。请根据提供的背景设定和当前文本,自然地续写一段高质量的剧情。\n"
21
+ "请直接输出续写内容,不要包含任何解释、前缀或后缀。"
22
+ )
23
+
24
+ # 3. Build User Prompt
25
+ full_context = style_context + kb_context + long_outline_context + short_outline_context
26
+ user_instruction = prompt if prompt else '请基于当前内容,自然地延续剧情,写一个完整的段落。'
27
+ user_prompt = (
28
+ f"### 背景设定与大纲\n{full_context}\n"
29
+ f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
30
+ f"### 用户指令\n{user_instruction}"
31
+ )
32
+
33
+ # 4. Call LLM
34
+ model_handler = ModelHandler()
35
+ response_generator = model_handler.generate_code(
36
+ system_prompt=system_prompt,
37
+ user_prompt=user_prompt,
38
+ model_choice=LING_1T
39
+ )
40
+
41
+ full_response = "".join(chunk for chunk in response_generator)
42
+ return full_response.strip()
43
+
44
+ except Exception as e:
45
+ print(f"[Agent] Error fetching paragraph continuation: {e}")
46
+ return f"获取续写时出错: {e}"
smart_writer_kit/agent_for_prompt_suggestion.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from model_handler import ModelHandler
3
+ from config import LING_MINI_2_0
4
+ from .agent_common_utils import format_df_to_string
5
+
6
+ def fetch_prompt_suggestions_agent(editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
7
+ """
8
+ Agent for fetching short prompt suggestions using LING_MINI_2_0.
9
+ """
10
+ print("\n[Agent][fetch_prompt_suggestions_agent] === 推理类型:续写提示推荐 ===")
11
+ try:
12
+ # 1. Format context
13
+ style_context = f"### 整体章程\n{style}\n\n"
14
+ kb_context = format_df_to_string(kb_df, "知识库")
15
+ short_outline_context = format_df_to_string(short_outline_df, "当前章节大纲")
16
+
17
+ # 2. Build System Prompt
18
+ system_prompt = (
19
+ "你是一个辅助写作的创意助手。请根据提供的故事背景和知识库,结合“互动”、“冲突”、“发展”、“对话”等动作,生成3个简短的续写提示短语。\n"
20
+ "要求:\n"
21
+ "1. 短语简洁明了,例如:“和Alpha争吵”、“探索废弃的地铁站”、“回忆起旧照片的往事”。\n"
22
+ "2. 尽量使用知识库中的专有名词。\n"
23
+ "3. 请严格遵守以下格式:输出3个短语,用 `|` 分隔。不要包含其他内容。\n"
24
+ "例如:和Alpha争吵|探索废弃的地铁站|回忆起旧照片的往事"
25
+ )
26
+
27
+ # 3. Build User Prompt
28
+ full_context = style_context + kb_context + short_outline_context
29
+ user_prompt = (
30
+ f"### 背景设定\n{full_context}\n"
31
+ f"### 当前已写内容 (末尾部分)\n{editor_content[-500:]}\n\n" # Only need a little context
32
+ f"### 任务\n生成3个续写提示。"
33
+ )
34
+
35
+ # 4. Call LLM
36
+ model_handler = ModelHandler()
37
+ response_generator = model_handler.generate_code(
38
+ system_prompt=system_prompt,
39
+ user_prompt=user_prompt,
40
+ model_choice=LING_MINI_2_0
41
+ )
42
+
43
+ full_response = "".join(chunk for chunk in response_generator)
44
+ print("【收到的建议】", full_response)
45
+
46
+ suggestions = full_response.split("|")
47
+ # Ensure 3 suggestions
48
+ suggestions += ["继续推进剧情"] * (3 - len(suggestions))
49
+
50
+ return suggestions[0].strip(), suggestions[1].strip(), suggestions[2].strip()
51
+
52
+ except Exception as e:
53
+ print(f"[Agent] Error fetching prompt suggestions: {e}")
54
+ return "生成失败", "生成失败", "生成失败"
tab_chat.py CHANGED
@@ -15,14 +15,26 @@ def create_chat_tab():
15
 
16
  def get_history_df(history):
17
  if not history:
18
- return pd.DataFrame({'ID': [], 'Conversation': []})
19
  df = pd.DataFrame(history)
20
- return df[['id', 'title']].rename(columns={'id': 'ID', 'title': 'Conversation'})
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- def handle_new_chat(history):
23
  conv_id = str(uuid.uuid4())
24
  new_convo = {
25
- "id": conv_id, "title": "New Conversation",
26
  "messages": [], "timestamp": datetime.now().isoformat()
27
  }
28
  updated_history = [new_convo] + (history or [])
@@ -41,7 +53,8 @@ def create_chat_tab():
41
  if convo["id"] == selected_id:
42
  return selected_id, convo["messages"]
43
  # Fallback to new chat if something goes wrong
44
- return handle_new_chat(history)[0], handle_new_chat(history)[2]
 
45
 
46
  with gr.Row(equal_height=False, elem_id="indicator-chat-tab"):
47
  with gr.Column(scale=1):
@@ -79,12 +92,12 @@ def create_chat_tab():
79
 
80
  # --- Event Handlers --- #
81
  # The change handler is now encapsulated within create_model_selector
82
- def on_select_recommendation(evt: gr.SelectData, history):
83
  selected_task = evt.value[0]
84
  item = next((i for i in RECOMMENDED_INPUTS if i["task"] == selected_task), None)
85
  if not item: return gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
86
 
87
- new_id, new_history, new_messages, history_df_update = handle_new_chat(history)
88
 
89
  return (
90
  new_id, new_history,
@@ -96,7 +109,7 @@ def create_chat_tab():
96
  new_messages
97
  )
98
 
99
- recommended_dataset.select(on_select_recommendation, inputs=[conversation_store], outputs=[current_conversation_id, conversation_store, model_dropdown, system_prompt_textbox, temperature_slider, textbox, history_df, chatbot], show_progress="none")
100
 
101
  def chat_stream(conv_id, history, model_display_name, message, chat_history, system_prompt, temperature):
102
  if not message:
@@ -112,7 +125,7 @@ def create_chat_tab():
112
  if not current_convo:
113
  return history, gr.update()
114
 
115
- if len(final_chat_history) > len(current_convo["messages"]) and current_convo["title"] == "New Conversation":
116
  user_message = final_chat_history[-2]["content"] if len(final_chat_history) > 1 else final_chat_history[0]["content"]
117
  current_convo["title"] = user_message[:50]
118
 
@@ -141,7 +154,7 @@ def create_chat_tab():
141
  [conversation_store, history_df]
142
  )
143
 
144
- new_chat_btn.click(handle_new_chat, inputs=[conversation_store], outputs=[current_conversation_id, conversation_store, chatbot, history_df])
145
  history_df.select(load_conversation_from_df, inputs=[history_df, conversation_store], outputs=[current_conversation_id, chatbot])
146
 
147
  return conversation_store, current_conversation_id, history_df, chatbot
 
15
 
16
  def get_history_df(history):
17
  if not history:
18
+ return pd.DataFrame({'ID': [], '对话': []})
19
  df = pd.DataFrame(history)
20
+ return df[['id', 'title']].rename(columns={'id': 'ID', 'title': '对话'})
21
+
22
+ def handle_new_chat(history, current_conv_id=None):
23
+ # Try to find the current conversation
24
+ current_convo = next((c for c in history if c["id"] == current_conv_id), None) if history else None
25
+
26
+ # If current conversation exists and is empty, reuse it
27
+ if current_convo and not current_convo.get("messages", []):
28
+ return (
29
+ current_conv_id,
30
+ history,
31
+ [],
32
+ gr.update(value=get_history_df(history))
33
+ )
34
 
 
35
  conv_id = str(uuid.uuid4())
36
  new_convo = {
37
+ "id": conv_id, "title": "(新对话)",
38
  "messages": [], "timestamp": datetime.now().isoformat()
39
  }
40
  updated_history = [new_convo] + (history or [])
 
53
  if convo["id"] == selected_id:
54
  return selected_id, convo["messages"]
55
  # Fallback to new chat if something goes wrong
56
+ new_id, _, new_msgs, _ = handle_new_chat(history)
57
+ return new_id, new_msgs
58
 
59
  with gr.Row(equal_height=False, elem_id="indicator-chat-tab"):
60
  with gr.Column(scale=1):
 
92
 
93
  # --- Event Handlers --- #
94
  # The change handler is now encapsulated within create_model_selector
95
+ def on_select_recommendation(evt: gr.SelectData, history, current_conv_id):
96
  selected_task = evt.value[0]
97
  item = next((i for i in RECOMMENDED_INPUTS if i["task"] == selected_task), None)
98
  if not item: return gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
99
 
100
+ new_id, new_history, new_messages, history_df_update = handle_new_chat(history, current_conv_id)
101
 
102
  return (
103
  new_id, new_history,
 
109
  new_messages
110
  )
111
 
112
+ recommended_dataset.select(on_select_recommendation, inputs=[conversation_store, current_conversation_id], outputs=[current_conversation_id, conversation_store, model_dropdown, system_prompt_textbox, temperature_slider, textbox, history_df, chatbot], show_progress="none")
113
 
114
  def chat_stream(conv_id, history, model_display_name, message, chat_history, system_prompt, temperature):
115
  if not message:
 
125
  if not current_convo:
126
  return history, gr.update()
127
 
128
+ if len(final_chat_history) > len(current_convo["messages"]) and current_convo["title"] == "(新对话)":
129
  user_message = final_chat_history[-2]["content"] if len(final_chat_history) > 1 else final_chat_history[0]["content"]
130
  current_convo["title"] = user_message[:50]
131
 
 
154
  [conversation_store, history_df]
155
  )
156
 
157
+ new_chat_btn.click(handle_new_chat, inputs=[conversation_store, current_conversation_id], outputs=[current_conversation_id, conversation_store, chatbot, history_df])
158
  history_df.select(load_conversation_from_df, inputs=[history_df, conversation_store], outputs=[current_conversation_id, chatbot])
159
 
160
  return conversation_store, current_conversation_id, history_df, chatbot
tab_smart_writer.py CHANGED
@@ -1,9 +1,12 @@
1
  import gradio as gr
2
  import time
3
  from smart_writer_kit.agent_for_streaming_completion import fetch_flow_suggestion_agent, accept_flow_suggestion_agent
4
- from smart_writer_kit.agent_for_inspiration_expansion import fetch_inspiration_agent, apply_inspiration_agent, fetch_paragraph_continuation_agent
 
 
5
  from smart_writer_kit.agent_for_outline_update import update_outline_status_agent
6
  from smart_writer_kit.agent_for_kb_update import suggest_new_kb_terms_agent
 
7
 
8
  # --- Mock Data (for UI population only) ---
9
 
@@ -43,9 +46,8 @@ def get_stats(text):
43
  # --- UI Construction ---
44
 
45
  def create_smart_writer_tab():
46
- debounce_state = gr.State({"last_change": 0, "active": False, "style": "", "kb": [], "short_outline": [], "long_outline": []})
47
- debounce_timer = gr.Timer(0.1, active=True)
48
-
49
 
50
  with gr.Row(equal_height=False, elem_id="indicator-writing-tab"):
51
  # --- Left Column: Entity Console ---
@@ -109,24 +111,40 @@ def create_smart_writer_tab():
109
 
110
  # Area 1: Real-time Continuation (Flow)
111
  with gr.Column(scale=1, min_width=200):
112
- gr.Markdown("#### ⚡️ 实时续写")
113
- with gr.Row():
114
- btn_accept_flow = gr.Button("采纳续写 (Tab)", size="sm", variant="primary", elem_id='btn-action-accept-flow')
115
- btn_change_flow = gr.Button("换一个 (Shift+Tab)", size="sm", elem_id='btn-action-change-flow')
116
-
117
  flow_suggestion_display = gr.Textbox(
118
- show_label=False,
 
119
  placeholder="(等待输入或点击“换一个”...)",
120
  lines=3,
121
  interactive=False,
122
- elem_classes=["flow-suggestion-box"]
123
  )
124
- # Debounce Progress Indicator
125
- debounce_progress = gr.HTML(value="", visible=False)
 
 
 
 
 
126
 
127
  # Area 2: Paragraph Continuation (Inspiration)
128
  with gr.Column(scale=1, min_width=200):
129
- gr.Markdown("#### ✨ 整段续写")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  with gr.Row():
131
  btn_generate_para = gr.Button("整段续写 (Cmd+Enter)", size="sm", variant="primary", elem_id="btn-action-create-paragraph")
132
  btn_change_para = gr.Button("换一个", size="sm")
@@ -164,25 +182,26 @@ def create_smart_writer_tab():
164
  # 1. Stats
165
  editor.change(fn=get_stats, inputs=editor, outputs=stats_display)
166
 
167
- # 2. Flow Suggestion Logic
168
- def start_debounce(editor_content, style, kb, short_outline, long_outline):
169
- return {"last_change": time.time(), "active": True, "style": style, "kb": kb, "short_outline": short_outline, "long_outline": long_outline}, gr.update(active=True), gr.update(visible=True, value="<progress value='0' max='100'></progress> 补全中... 3.0s")
170
-
171
- def update_debounce(debounce_state, editor_content):
172
- if not debounce_state["active"]:
173
- return gr.update(), gr.update(), debounce_state, gr.update()
174
- elapsed = time.time() - debounce_state["last_change"]
175
- if elapsed >= 3:
176
- suggestion = fetch_flow_suggestion_agent(editor_content, debounce_state["style"], debounce_state["kb"], debounce_state["short_outline"], debounce_state["long_outline"])
177
- return gr.update(visible=False), suggestion, {"last_change": 0, "active": False, "style": "", "kb": [], "short_outline": [], "long_outline": []}, gr.update(active=False)
178
- else:
179
- progress = int((elapsed / 3) * 100)
180
- remaining = 3 - elapsed
181
- progress_html = f"<progress value='{progress}' max='100'></progress> 补全中... {remaining:.1f}s"
182
- return gr.update(value=progress_html), gr.update(), debounce_state, gr.update()
183
-
184
- editor.change(fn=start_debounce, inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input], outputs=[debounce_state, debounce_timer, debounce_progress])
185
- debounce_timer.tick(fn=update_debounce, inputs=[debounce_state, editor], outputs=[debounce_progress, flow_suggestion_display, debounce_state, debounce_timer])
 
186
 
187
  btn_change_flow.click(fn=fetch_flow_suggestion_agent, inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input], outputs=flow_suggestion_display)
188
 
@@ -191,18 +210,17 @@ def create_smart_writer_tab():
191
  btn_accept_flow.click(
192
  fn=lambda e, s: (accept_flow_suggestion_agent(e, s), ""), # Accept and clear suggestion
193
  inputs=accept_flow_fn_inputs,
194
- outputs=[editor, flow_suggestion_display],
195
- show_progress="hidden"
196
  )
197
 
198
- # 3. Paragraph Continuation Logic
199
- def generate_paragraph_wrapper(editor_val, style, kb, short, long_):
200
- return fetch_paragraph_continuation_agent(None, editor_val, style, kb, short, long_)
201
 
202
  for btn in [btn_generate_para, btn_change_para]:
203
  btn.click(
204
  fn=generate_paragraph_wrapper,
205
- inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input],
206
  outputs=[para_suggestion_display]
207
  )
208
 
@@ -217,6 +235,29 @@ def create_smart_writer_tab():
217
  inputs=[editor, para_suggestion_display],
218
  outputs=[editor, para_suggestion_display]
219
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
  # 4. Agent-based Context Updates
222
  btn_sync_outline.click(
@@ -228,4 +269,4 @@ def create_smart_writer_tab():
228
  fn=suggest_new_kb_terms_agent,
229
  inputs=[kb_input, editor],
230
  outputs=[suggested_kb_dataframe]
231
- )
 
1
  import gradio as gr
2
  import time
3
  from smart_writer_kit.agent_for_streaming_completion import fetch_flow_suggestion_agent, accept_flow_suggestion_agent
4
+ from smart_writer_kit.agent_for_inspiration_expansion import fetch_inspiration_agent, apply_inspiration_agent
5
+ from smart_writer_kit.agent_for_paragraph_continuation import fetch_paragraph_continuation_agent
6
+ from smart_writer_kit.agent_for_prompt_suggestion import fetch_prompt_suggestions_agent
7
  from smart_writer_kit.agent_for_outline_update import update_outline_status_agent
8
  from smart_writer_kit.agent_for_kb_update import suggest_new_kb_terms_agent
9
+ from ui_components.debounce_manager import DebounceManager
10
 
11
  # --- Mock Data (for UI population only) ---
12
 
 
46
  # --- UI Construction ---
47
 
48
  def create_smart_writer_tab():
49
+ # Initialize DebounceManager
50
+ debounce_manager = DebounceManager(debounce_time=2.0, tick_time=0.3, loading_text="稍后开始续写")
 
51
 
52
  with gr.Row(equal_height=False, elem_id="indicator-writing-tab"):
53
  # --- Left Column: Entity Console ---
 
111
 
112
  # Area 1: Real-time Continuation (Flow)
113
  with gr.Column(scale=1, min_width=200):
114
+
 
 
 
 
115
  flow_suggestion_display = gr.Textbox(
116
+ show_label=True,
117
+ label="实时续写建议",
118
  placeholder="(等待输入或点击“换一个”...)",
119
  lines=3,
120
  interactive=False,
121
+ elem_classes=["flow-suggestion-box"],
122
  )
123
+
124
+ btn_accept_flow = gr.Button("采纳续写 (Tab)", size="sm", variant="primary", elem_id='btn-action-accept-flow')
125
+ btn_change_flow = gr.Button("换一个 (Shift+Tab)", size="sm", elem_id='btn-action-change-flow')
126
+
127
+ # Debounce Progress Indicator (Using Manager)
128
+ debounce_state, debounce_timer, debounce_progress = debounce_manager.create_ui()
129
+ debounce_progress.visible = True
130
 
131
  # Area 2: Paragraph Continuation (Inspiration)
132
  with gr.Column(scale=1, min_width=200):
133
+ inspiration_prompt_input = gr.Textbox(
134
+ label="续写提示",
135
+ placeholder="例如:写一段关于...的描写",
136
+ lines=2
137
+ )
138
+
139
+ prompt_suggestions_dataset = gr.Dataset(
140
+ label="推荐提示 (点击填入)",
141
+ components=[gr.Textbox(visible=False)],
142
+ samples=[["生成建议..."], ["生成建议..."], ["生成建议..."]],
143
+ type="values"
144
+ )
145
+
146
+ refresh_suggestions_btn = gr.Button("🎲 换一批建议", size="sm", variant="secondary") # Combined trigger
147
+
148
  with gr.Row():
149
  btn_generate_para = gr.Button("整段续写 (Cmd+Enter)", size="sm", variant="primary", elem_id="btn-action-create-paragraph")
150
  btn_change_para = gr.Button("换一个", size="sm")
 
182
  # 1. Stats
183
  editor.change(fn=get_stats, inputs=editor, outputs=stats_display)
184
 
185
+ # 2. Flow Suggestion Logic (Using DebounceManager)
186
+
187
+ # Bind reset logic to editor change
188
+ editor.change(
189
+ fn=debounce_manager.reset,
190
+ inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input], # Capture all context as payload
191
+ outputs=[debounce_state, debounce_timer, debounce_progress]
192
+ )
193
+
194
+ # Bind tick logic
195
+ def flow_suggestion_trigger(editor_content, style, kb, short_outline, long_outline):
196
+ return fetch_flow_suggestion_agent(editor_content, style, kb, short_outline, long_outline)
197
+
198
+ # Note: debounce_manager.tick calls the trigger function.
199
+ # The lambda is used to pass the specific trigger function for this tab.
200
+ debounce_timer.tick(
201
+ fn=lambda s: debounce_manager.tick(s, flow_suggestion_trigger),
202
+ inputs=[debounce_state],
203
+ outputs=[debounce_progress, debounce_state, debounce_timer, flow_suggestion_display]
204
+ )
205
 
206
  btn_change_flow.click(fn=fetch_flow_suggestion_agent, inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input], outputs=flow_suggestion_display)
207
 
 
210
  btn_accept_flow.click(
211
  fn=lambda e, s: (accept_flow_suggestion_agent(e, s), ""), # Accept and clear suggestion
212
  inputs=accept_flow_fn_inputs,
213
+ outputs=[editor, flow_suggestion_display]
 
214
  )
215
 
216
+ # 3. Paragraph Continuation Logic (Updated with prompt input)
217
+ def generate_paragraph_wrapper(prompt_val, editor_val, style, kb, short, long_):
218
+ return fetch_paragraph_continuation_agent(prompt_val, editor_val, style, kb, short, long_)
219
 
220
  for btn in [btn_generate_para, btn_change_para]:
221
  btn.click(
222
  fn=generate_paragraph_wrapper,
223
+ inputs=[inspiration_prompt_input, editor, style_input, kb_input, short_outline_input, long_outline_input],
224
  outputs=[para_suggestion_display]
225
  )
226
 
 
235
  inputs=[editor, para_suggestion_display],
236
  outputs=[editor, para_suggestion_display]
237
  )
238
+
239
+ # Suggestions Logic
240
+ # Trigger for suggestion generation
241
+ def refresh_suggestions_wrapper(editor_content, style, kb, short_outline, long_outline):
242
+ s1, s2, s3 = fetch_prompt_suggestions_agent(editor_content, style, kb, short_outline, long_outline)
243
+ # Return a gr.update object to properly update the Dataset component
244
+ return gr.update(samples=[[s1], [s2], [s3]])
245
+
246
+ refresh_suggestions_btn.click(
247
+ fn=refresh_suggestions_wrapper,
248
+ inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input],
249
+ outputs=[prompt_suggestions_dataset]
250
+ )
251
+
252
+ # Dataset click -> fill prompt input
253
+ def fill_prompt_from_dataset(val):
254
+ return val[0]
255
+
256
+ prompt_suggestions_dataset.click(
257
+ fn=fill_prompt_from_dataset,
258
+ inputs=prompt_suggestions_dataset,
259
+ outputs=inspiration_prompt_input
260
+ )
261
 
262
  # 4. Agent-based Context Updates
263
  btn_sync_outline.click(
 
269
  fn=suggest_new_kb_terms_agent,
270
  inputs=[kb_input, editor],
271
  outputs=[suggested_kb_dataframe]
272
+ )
ui_components/debounce_manager.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+
4
+ class DebounceManager:
5
+ def __init__(self, debounce_time: float, tick_time: float, loading_text: str):
6
+ """
7
+ Manages debounce logic and UI updates.
8
+
9
+ Args:
10
+ debounce_time (float): The time in seconds to wait before triggering the action.
11
+ tick_time (float): The interval in seconds for the timer tick.
12
+ loading_text (str): The text to display while waiting.
13
+ """
14
+ self.debounce_time = debounce_time
15
+ self.tick_time = tick_time
16
+ self.loading_text = loading_text
17
+
18
+ def _generate_progress_html(self, progress_percent: int, remaining_time: float = None) -> str:
19
+ """
20
+ Generates the HTML string for the progress bar.
21
+
22
+ Args:
23
+ progress_percent (int): Current progress percentage (0-100).
24
+ remaining_time (float, optional): Remaining time in seconds. If None, uses debounce_time.
25
+
26
+ Returns:
27
+ str: HTML string for the progress bar.
28
+ """
29
+ if remaining_time is None:
30
+ display_time = self.debounce_time
31
+ else:
32
+ display_time = remaining_time
33
+
34
+ return f"<div style='height: 20px; display: flex; align-items: center;'><progress value='{progress_percent}' max='100' style='width: 100px; margin-right: 10px;'></progress> <span>{self.loading_text} {display_time:.1f}s</span></div>"
35
+
36
+ def create_ui(self):
37
+ """
38
+ Creates the necessary UI components for the debounce mechanism.
39
+
40
+ Returns:
41
+ tuple: (debounce_state, debounce_timer, debounce_progress)
42
+ """
43
+ # State to store: last_change timestamp, active status, and payload (context)
44
+ debounce_state = gr.State({"last_change": 0, "active": False, "payload": None})
45
+ debounce_timer = gr.Timer(self.tick_time, active=False)
46
+ # Use the new method for initial value
47
+ initial_progress_html = self._generate_progress_html(0)
48
+ debounce_progress = gr.HTML(value=initial_progress_html, visible=True, elem_classes=["no-transition"])
49
+ return debounce_state, debounce_timer, debounce_progress
50
+
51
+ def reset(self, *args):
52
+ """
53
+ Resets the debounce timer. Call this when the monitored input changes.
54
+ Passes through any arguments as the 'payload' to be stored in state.
55
+ """
56
+ # Store all arguments as payload
57
+ payload = args if len(args) > 1 else (args[0] if args else None)
58
+
59
+ # Use the new method for progress HTML
60
+ progress_html = self._generate_progress_html(0) # Start at 0%
61
+
62
+ return {
63
+ "last_change": time.time(),
64
+ "active": True,
65
+ "payload": payload
66
+ }, gr.update(active=True), gr.update(visible=True, value=progress_html)
67
+
68
+ def tick(self, debounce_state, trigger_fn):
69
+ """
70
+ Called on every timer tick. checks if debounce time has passed.
71
+
72
+ Args:
73
+ debounce_state (dict): The current debounce state.
74
+ trigger_fn (callable): The function to execute when debounce completes.
75
+ It should accept the stored 'payload' as arguments.
76
+
77
+ Returns:
78
+ tuple: Updates for (debounce_progress, debounce_state, debounce_timer) + result of trigger_fn
79
+ """
80
+ # 1. If not active, do nothing
81
+ if not debounce_state["active"]:
82
+ # Return empty updates for UI components, and a dummy update for the trigger output
83
+ return gr.update(), debounce_state, gr.update(), gr.update()
84
+
85
+ elapsed = time.time() - debounce_state["last_change"]
86
+
87
+ # 2. Check if time is up
88
+ if elapsed >= self.debounce_time:
89
+ # Execute the trigger function with the stored payload
90
+ payload = debounce_state["payload"]
91
+ if isinstance(payload, tuple):
92
+ result = trigger_fn(*payload)
93
+ else:
94
+ result = trigger_fn(payload)
95
+
96
+ # Reset state to inactive
97
+ new_state = {"last_change": 0, "active": False, "payload": None}
98
+
99
+ # Return: Hide Progress, Update State, Stop Timer, Trigger Result
100
+ return gr.update(value=self._generate_progress_html(0)), new_state, gr.update(active=False), result
101
+
102
+ else:
103
+ # 3. Update Progress
104
+ progress_percent = int((elapsed / self.debounce_time) * 100)
105
+ remaining = self.debounce_time - elapsed
106
+
107
+ # Use the new method for progress HTML
108
+ progress_html = self._generate_progress_html(progress_percent, remaining)
109
+
110
+ # Return: Update Progress, Keep State, Keep Timer, Dummy Update for Result
111
+ return gr.update(value=progress_html, visible=True), debounce_state, gr.update(), gr.update()