GitHub Action commited on
Commit
74ebe5c
·
1 Parent(s): b931367

Sync ling-space changes from GitHub commit 86dd25a

Browse files
model_handler.py CHANGED
@@ -68,6 +68,16 @@ class OpenAICompatibleProvider(ModelProvider):
68
  # Initialize assistant's response in chat_history
69
  chat_history.append({"role": "assistant", "content": ""}) # Placeholder for assistant's streaming response
70
 
 
 
 
 
 
 
 
 
 
 
71
  try:
72
  with httpx.stream(
73
  "POST",
@@ -88,12 +98,14 @@ class OpenAICompatibleProvider(ModelProvider):
88
  delta = data["choices"][0].get("delta", {})
89
  content_chunk = delta.get("content")
90
  if content_chunk:
 
91
  chat_history[-1]["content"] += content_chunk
92
  yield chat_history
93
  except json.JSONDecodeError:
94
  print(f"Error decoding JSON chunk: {chunk}")
 
95
  except Exception as e:
96
- print(f"Error during API call: {e}")
97
  # Ensure the last message (assistant's placeholder) is updated with the error
98
  if chat_history and chat_history[-1]["role"] == "assistant":
99
  chat_history[-1]["content"] = f"An error occurred: {e}"
@@ -122,7 +134,15 @@ class OpenAICompatibleProvider(ModelProvider):
122
  "stream": True,
123
  "temperature": temperature,
124
  }
125
-
 
 
 
 
 
 
 
 
126
  try:
127
  with httpx.stream("POST", f"{self.api_base}/chat/completions", headers=headers, json=json_data, timeout=120) as response:
128
  response.raise_for_status()
@@ -137,9 +157,11 @@ class OpenAICompatibleProvider(ModelProvider):
137
  delta = data["choices"][0].get("delta", {})
138
  content_chunk = delta.get("content")
139
  if content_chunk:
 
140
  yield content_chunk
141
  except json.JSONDecodeError:
142
  print(f"Error decoding JSON chunk: {chunk}")
 
143
  except Exception as e:
144
  print(f"Error during API call: {e}")
145
  yield f"An error occurred: {e}"
@@ -188,7 +210,7 @@ class ModelHandler:
188
 
189
  yield from provider.get_response(model_id, message, chat_history, system_prompt, temperature)
190
 
191
- def generate_code(self, system_prompt, user_prompt, code_type, model_choice):
192
  """
193
  Generates code using the specified model.
194
  """
@@ -197,7 +219,6 @@ class ModelHandler:
197
  # Fallback if display name not found, maybe model_choice is the constant itself
198
  model_constant = model_choice if model_choice in CHAT_MODEL_SPECS else "LING_1T"
199
 
200
-
201
  model_spec = self.config.get(model_constant, {})
202
  provider_name = model_spec.get("provider")
203
  model_id = model_spec.get("model_id")
 
68
  # Initialize assistant's response in chat_history
69
  chat_history.append({"role": "assistant", "content": ""}) # Placeholder for assistant's streaming response
70
 
71
+ # 日志输出 - 在这里打印完整的请求数据(system, history, user, model_id)
72
+ print("\n>>> DEBUG: get_response")
73
+ print(">>> DEBUG: Sending request to OpenAI-compatible API")
74
+ print(">>> : System prompt:", repr(system_prompt))
75
+ print(">>> : Chat history:", repr(chat_history))
76
+ print(">>> : User message:", repr(message))
77
+ print(">>> : Model ID:", repr(model_id))
78
+ print(">>> : Temperature:", repr(temperature))
79
+
80
+ full_response = ""
81
  try:
82
  with httpx.stream(
83
  "POST",
 
98
  delta = data["choices"][0].get("delta", {})
99
  content_chunk = delta.get("content")
100
  if content_chunk:
101
+ full_response += content_chunk
102
  chat_history[-1]["content"] += content_chunk
103
  yield chat_history
104
  except json.JSONDecodeError:
105
  print(f"Error decoding JSON chunk: {chunk}")
106
+ print(f"DEBUG: Full code response: {full_response}")
107
  except Exception as e:
108
+ print(f"XXX DEBUG: Error during API call: {e}")
109
  # Ensure the last message (assistant's placeholder) is updated with the error
110
  if chat_history and chat_history[-1]["role"] == "assistant":
111
  chat_history[-1]["content"] = f"An error occurred: {e}"
 
134
  "stream": True,
135
  "temperature": temperature,
136
  }
137
+
138
+ print("\n>>> DEBUG: get_code_response")
139
+ print(">>> DEBUG: Sending request to OpenAI-compatible API")
140
+ print(">>> : System prompt:", repr(system_prompt))
141
+ print(">>> : User message:", repr(user_prompt))
142
+ print(">>> : Model ID:", repr(model_id))
143
+ print(">>> : Temperature:", repr(temperature))
144
+
145
+ full_response = ""
146
  try:
147
  with httpx.stream("POST", f"{self.api_base}/chat/completions", headers=headers, json=json_data, timeout=120) as response:
148
  response.raise_for_status()
 
157
  delta = data["choices"][0].get("delta", {})
158
  content_chunk = delta.get("content")
159
  if content_chunk:
160
+ full_response += content_chunk
161
  yield content_chunk
162
  except json.JSONDecodeError:
163
  print(f"Error decoding JSON chunk: {chunk}")
164
+ print(f"DEBUG: Full code response: {full_response}")
165
  except Exception as e:
166
  print(f"Error during API call: {e}")
167
  yield f"An error occurred: {e}"
 
210
 
211
  yield from provider.get_response(model_id, message, chat_history, system_prompt, temperature)
212
 
213
+ def generate_code(self, system_prompt, user_prompt, model_choice):
214
  """
215
  Generates code using the specified model.
216
  """
 
219
  # Fallback if display name not found, maybe model_choice is the constant itself
220
  model_constant = model_choice if model_choice in CHAT_MODEL_SPECS else "LING_1T"
221
 
 
222
  model_spec = self.config.get(model_constant, {})
223
  provider_name = model_spec.get("provider")
224
  model_id = model_spec.get("model_id")
smart_writer_kit/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # This file makes the directory a Python package
smart_writer_kit/agent_for_inspiration_expansion.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from model_handler import ModelHandler
4
+ from config import LING_1T
5
+
6
+ def _format_df_to_string(df: pd.DataFrame, title: str) -> str:
7
+ """Formats a pandas DataFrame into a markdown-like string for the prompt."""
8
+ if df is None or df.empty:
9
+ return ""
10
+
11
+ header = f"### {title}\n"
12
+ rows = []
13
+ for _, row in df.iterrows():
14
+ if 'Done' in df.columns and 'Task' in df.columns:
15
+ status = "[x]" if row['Done'] else "[ ]"
16
+ rows.append(f"- {status} {row['Task']}")
17
+ elif 'Term' in df.columns and 'Description' in df.columns:
18
+ rows.append(f"- **{row['Term']}**: {row['Description']}")
19
+
20
+ return header + "\n".join(rows) + "\n\n"
21
+
22
+ def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
23
+ """
24
+ Agent for fetching inspiration options using a real LLM.
25
+ """
26
+ print("\n[Agent][fetch_inspiration_agent] === 推理类型:灵感扩写 ===")
27
+ print("【发出的完整上下文】")
28
+ print("prompt:", repr(prompt))
29
+ print("editor_content:", repr(editor_content))
30
+ print("style:", repr(style))
31
+ print("kb_df:", repr(kb_df.to_dict("records")))
32
+ print("short_outline_df:", repr(short_outline_df.to_dict("records")))
33
+ print("long_outline_df:", repr(long_outline_df.to_dict("records")))
34
+
35
+ try:
36
+ # 1. Format context from UI inputs
37
+ style_context = f"### 整体章程\n{style}\n\n"
38
+ kb_context = _format_df_to_string(kb_df, "知识库")
39
+ short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
40
+ long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")
41
+
42
+ # 2. Build System Prompt
43
+ system_prompt = (
44
+ "你是一个富有创意的长篇小说家,你的任务是根据提供的背景设定和当前文本,创作三个不同的、有创意的剧情发展方向。\n"
45
+ "请严格遵守以下格式:直接开始写第一个选项,然后用 `[END_OF_CHOICE]` 作为分隔符,接着写第二个选项,再用 `[END_OF_CHOICE]` 分隔,最后写第三个选项。不要有任何额外的解释或编号。\n"
46
+ "例如:\n"
47
+ "剧情发展一的内容...[END_OF_CHOICE]剧情发展二的内容...[END_OF_CHOICE]剧情发展三的内容..."
48
+ )
49
+
50
+ # 3. Build User Prompt
51
+ full_context = style_context + kb_context + long_outline_context + short_outline_context
52
+ user_prompt = (
53
+ f"### 背景设定与大纲\n{full_context}\n"
54
+ f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
55
+ f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情。'}"
56
+ )
57
+
58
+ # 4. Call LLM
59
+ model_handler = ModelHandler()
60
+ response_generator = model_handler.generate_code(
61
+ system_prompt=system_prompt,
62
+ user_prompt=user_prompt,
63
+ model_choice=LING_1T
64
+ )
65
+
66
+ full_response = "".join(chunk for chunk in response_generator)
67
+
68
+ print("【收到的完整上下文】")
69
+ print("full_response:", repr(full_response))
70
+
71
+ # 5. Parse response and update UI
72
+ choices = full_response.split("[END_OF_CHOICE]")
73
+ # Ensure we have exactly 3 choices, padding with placeholders if necessary
74
+ choices += ["(模型未生成足够选项)"] * (3 - len(choices))
75
+
76
+ print(f"[Agent] LLM Choices Received: {len(choices)}")
77
+
78
+ return gr.update(visible=True), choices[0].strip(), choices[1].strip(), choices[2].strip()
79
+
80
+ except Exception as e:
81
+ print(f"[Agent] Error fetching inspiration: {e}")
82
+ error_message = f"获取灵感时出错: {e}"
83
+ return gr.update(visible=True), error_message, "请检查日志", "请检查日志"
84
+
85
+
86
+ def apply_inspiration_agent(current_text: str, inspiration_text: str):
87
+ """
88
+ Agent for applying selected inspiration to the editor.
89
+ """
90
+ print("\n[Agent][apply_inspiration_agent] === 推理类型:应用灵感 ===")
91
+ print("【发出的完整上下文】")
92
+ print("current_text:", repr(current_text))
93
+ print("inspiration_text:", repr(inspiration_text))
94
+ if not current_text:
95
+ new_text = inspiration_text
96
+ else:
97
+ new_text = current_text + "\n\n" + inspiration_text
98
+ print("【收到的完整上下文】")
99
+ print("new_text:", repr(new_text))
100
+ # Return a tuple that unpacks into the outputs for the Gradio event handler
101
+ return new_text, gr.update(visible=False), ""
smart_writer_kit/agent_for_kb_update.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ from model_handler import ModelHandler
5
+ from config import LING_1T
6
+
7
+ def _format_kb_for_prompt(df: pd.DataFrame) -> str:
8
+ """Formats the knowledge base DataFrame into a simple list for the prompt."""
9
+ if df is None or df.empty:
10
+ return "无。"
11
+ terms = [f"- {row['Term']}" for _, row in df.iterrows()]
12
+ return "\n".join(terms)
13
+
14
+ def suggest_new_kb_terms_agent(kb_df: pd.DataFrame, editor_content: str):
15
+ """
16
+ Agent to extract new terms from the text to recommend for the knowledge base using a real LLM.
17
+ """
18
+
19
+ if editor_content is None or len(editor_content.strip()) < 50:
20
+ print("[Agent] Editor content too short, skipping KB suggestion.")
21
+ # Return empty data and keep components hidden
22
+ return gr.update(value=[], visible=False), gr.update(visible=False)
23
+
24
+ try:
25
+ # 1. Prepare Prompts
26
+ system_prompt = (
27
+ "你是一个实体提取机器人。你的任务是从给定文本中识别出新的、重要的、值得记录的专有名词(如人名、地名、组织、物品)或核心概念,并为它们提供一句简洁的描述。\n"
28
+ "你的回答必须是一个遵循以下规则的 JSON 数组:\n"
29
+ "1. 数组中的每个元素都是一个对象。\n"
30
+ "2. 每个对象必须包含两个键:`Term` (词条名) 和 `Description` (描述)。\n"
31
+ "3. 不要提取已经存在于'现有知识库'中的词条。\n"
32
+ "4. 最多返回 5 个最重要的词条。\n"
33
+ "5. 不要返回除了这个 JSON 数组之外的任何其他文本、解释或代码块标记。"
34
+ )
35
+
36
+ kb_str = _format_kb_for_prompt(kb_df)
37
+ user_prompt = (
38
+ f"### 现有知识库\n{kb_str}\n\n"
39
+ f"### 当前文本\n{editor_content[-4000:]}\n\n"
40
+ "### 指令\n请根据'当前文本',分析并提取出新的知识库词条,并返回 JSON 数组。"
41
+ )
42
+
43
+ # 2. Call LLM
44
+ model_handler = ModelHandler()
45
+ response_generator = model_handler.generate_code(
46
+ system_prompt=system_prompt,
47
+ user_prompt=user_prompt,
48
+ model_choice=LING_1T
49
+ )
50
+ full_response = "".join(chunk for chunk in response_generator)
51
+
52
+ # 3. Parse JSON and format for DataFrame
53
+ print("【收到的完整上下文】")
54
+ print("full_response:", repr(full_response))
55
+
56
+ if full_response.strip().startswith("```json"):
57
+ full_response = full_response.strip()[7:-3].strip()
58
+
59
+ suggested_terms = json.loads(full_response)
60
+
61
+ # Convert list of dicts to list of lists for Gradio Dataframe
62
+ df_data = [[item.get("Term", ""), item.get("Description", "")] for item in suggested_terms]
63
+
64
+ print("【收到的完整上下文】")
65
+ print("suggested_terms:", repr(suggested_terms))
66
+
67
+ # Make components visible and return data
68
+ return gr.update(value=df_data, visible=True), gr.update(visible=True)
69
+
70
+ except json.JSONDecodeError:
71
+ print(f"[Agent] Error: Failed to decode JSON from LLM response for KB: {full_response}")
72
+ return gr.update(visible=False), gr.update(visible=False)
73
+ except Exception as e:
74
+ print(f"[Agent] Error suggesting new KB terms: {e}")
75
+ return gr.update(visible=False), gr.update(visible=False)
smart_writer_kit/agent_for_outline_update.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import json
4
+ from model_handler import ModelHandler
5
+ from config import LING_FLASH_2_0
6
+
7
+ def _format_outline_for_prompt(df: pd.DataFrame) -> str:
8
+ """Formats the outline DataFrame into a simple numbered list for the prompt."""
9
+ if df is None or df.empty:
10
+ return "无任务。"
11
+
12
+ tasks = [f"{i+1}. {row['Task']}" for i, row in df.iterrows()]
13
+ return "\n".join(tasks)
14
+
15
+ def update_outline_status_agent(short_outline_df: pd.DataFrame, editor_content: str):
16
+ """
17
+ Agent to analyze text and update the outline's completion status using a real LLM.
18
+ """
19
+
20
+ if editor_content is None or len(editor_content.strip()) < 20:
21
+ return short_outline_df # Return original df
22
+
23
+ try:
24
+ # 1. Prepare Prompts
25
+ system_prompt = (
26
+ "你是一个任务分析机器人。请仔细阅读用户提供的'已完成大纲'和'当前文本',判断大纲中的每项任务是否已经在文本中被完成。\n"
27
+ "你的回答必须是一个遵循以下规则的 JSON 对象:\n"
28
+ "1. JSON 的 key 是大纲中的任务原文。\n"
29
+ "2. JSON 的 value 是一个布尔值 (`true` 或 `false`),`true` 代表任务已完成,`false` 代表未完成。\n"
30
+ "3. 不要返回除了这个 JSON 对象之外的任何其他文本、解释或代码块标记。"
31
+ )
32
+
33
+ outline_str = _format_outline_for_prompt(short_outline_df)
34
+ user_prompt = (
35
+ f"### 已有大纲\n{outline_str}\n\n"
36
+ f"### 当前文本\n{editor_content[-4000:]}\n\n"
37
+ "### 指令\n请根据上述'当前文本',分析'已有大纲'中的任务完成情况,并返回 JSON 对象。"
38
+ )
39
+
40
+ # 2. Call LLM
41
+ model_handler = ModelHandler()
42
+ response_generator = model_handler.generate_code(
43
+ system_prompt=system_prompt,
44
+ user_prompt=user_prompt,
45
+ model_choice=LING_FLASH_2_0
46
+ )
47
+ full_response = "".join(chunk for chunk in response_generator)
48
+
49
+ # 3. Parse JSON and Update DataFrame
50
+ print("【收到的完整上下文】")
51
+ print("full_response:", repr(full_response))
52
+
53
+ # Clean up potential markdown code block
54
+ if full_response.strip().startswith("```json"):
55
+ full_response = full_response.strip()[7:-3].strip()
56
+
57
+ completion_status = json.loads(full_response)
58
+
59
+ # Create a copy to avoid modifying the original df in place
60
+ updated_df = short_outline_df.copy()
61
+
62
+ for i, row in updated_df.iterrows():
63
+ task_text = row['Task']
64
+ if task_text in completion_status:
65
+ updated_df.at[i, 'Done'] = bool(completion_status[task_text])
66
+
67
+ print("【收到的完整上下文】")
68
+ print("updated_df:\n", updated_df.to_string())
69
+ return updated_df
70
+
71
+ except json.JSONDecodeError:
72
+ print(f"[Agent] Error: Failed to decode JSON from LLM response: {full_response}")
73
+ # On JSON error, we don't want to change anything.
74
+ return short_outline_df
75
+ except Exception as e:
76
+ print(f"[Agent] Error updating outline status: {e}")
77
+ # On other errors, also return the original dataframe.
78
+ return short_outline_df
smart_writer_kit/agent_for_streaming_completion.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model_handler import ModelHandler
2
+ from config import LING_FLASH_2_0
3
+
4
+ def fetch_flow_suggestion_agent(editor_content: str):
5
+ """
6
+ Agent for fetching a short, real-time continuation.
7
+ This agent calls a real LLM.
8
+ """
9
+
10
+ if not editor_content or len(editor_content.strip()) < 5:
11
+ return "(请输入更多内容以获取建议...)"
12
+
13
+ try:
14
+ model_handler = ModelHandler()
15
+
16
+ # For a simple continuation, we can use a concise system prompt.
17
+ system_prompt = "你是一个写作助手,请根据用户输入的内容,紧接着写一句简短、流畅的续写。不要重复用户已输入的内容,直接开始写你续写的部分即可。**尤其关注用户输入的最后几个字**。"
18
+
19
+
20
+
21
+ # We use editor_content as the user prompt.
22
+ # Let's take the last N characters to be more efficient.
23
+ user_prompt = editor_content[-1000:]
24
+
25
+ # Use generate_code as it's a simple generator for direct content.
26
+ # We need to provide a dummy code_type and a model_choice.
27
+ # The model_choice here is the display name, but we can pass the constant.
28
+ response_generator = model_handler.generate_code(
29
+ system_prompt=system_prompt,
30
+ user_prompt=user_prompt,
31
+ model_choice=LING_FLASH_2_0
32
+ )
33
+
34
+ # Assemble the streamed response
35
+ full_response = "".join(chunk for chunk in response_generator)
36
+ print("【收到的完整上下文】")
37
+ print("full_response:", repr(full_response))
38
+ return full_response.strip()
39
+
40
+ except Exception as e:
41
+ print(f"[Agent] Error fetching flow suggestion: {e}")
42
+ return f"(获取建议时出错: {e})"
43
+
44
+
45
+ def accept_flow_suggestion_agent(current_text: str, suggestion: str):
46
+ """
47
+ Agent for accepting a flow suggestion.
48
+ """
49
+
50
+ if not suggestion or "等待输入" in suggestion or "出错" in suggestion:
51
+ result = current_text
52
+ else:
53
+ result = current_text + suggestion
54
+ print("【收到的完整上下文】")
55
+ print("result:", repr(result))
56
+ return result
tab_code.py CHANGED
@@ -78,7 +78,7 @@ def generate_code(code_type, model_choice, user_prompt, chatbot_history):
78
  buffer = ""
79
  is_thinking = False
80
 
81
- for code_chunk in model_handler.generate_code(system_prompt, user_prompt, code_type, model_choice):
82
  full_code_with_think += code_chunk
83
  buffer += code_chunk
84
 
 
78
  buffer = ""
79
  is_thinking = False
80
 
81
+ for code_chunk in model_handler.generate_code(system_prompt, user_prompt, model_choice):
82
  full_code_with_think += code_chunk
83
  buffer += code_chunk
84
 
tab_smart_writer.py CHANGED
@@ -1,8 +1,11 @@
1
  import gradio as gr
2
  import time
3
- import random
 
 
 
4
 
5
- # --- Mock Data ---
6
 
7
  MOCK_STYLE = """风格:赛博朋克 / 黑色电影
8
  视角:第三人称限制视角(主角:凯)
@@ -33,97 +36,23 @@ MOCK_LONG_TERM_OUTLINE = [
33
  [False, "与荒坂公司的最终决战。"]
34
  ]
35
 
36
- MOCK_INSPIRATIONS = [
37
- "霓虹灯光在雨后的路面上破碎成无数光斑,凯拉紧了风衣的领口,义体手臂在寒风中隐隐作痛。来生酒吧的招牌在雾气中若隐若现,像是一只在黑暗中窥视的电子眼。",
38
- "\"你来晚了。\"接头人的声音经过变声器处理,听起来像是指甲划过玻璃。他坐在阴影里,只有指尖的一点红光在闪烁——那是他正在抽的廉价合成烟。",
39
- "突如其来的爆炸声震碎了酒吧的玻璃,人群尖叫着四散奔逃。凯本能地拔出了腰间的动能手枪,他的视觉系统瞬间切换到了战斗模式,周围的一切都变成了数据流。"
40
- ]
41
-
42
- MOCK_FLOW_SUGGESTIONS = [
43
- "他感觉到了...",
44
- "空气中弥漫着...",
45
- "那是他从未见过的...",
46
- "就在这一瞬间..."
47
- ]
48
-
49
- # --- Logic Functions ---
50
-
51
  def get_stats(text):
52
- """Mock word count and read time."""
53
  if not text:
54
  return "0 Words | 0 mins"
55
- words = len(text)
56
- read_time = max(1, words // 500)
57
  return f"{words} Words | ~{read_time} mins"
58
 
59
- def fetch_inspiration(prompt):
60
- """Simulate fetching inspiration options based on user prompt."""
61
- time.sleep(1)
62
-
63
- # Simple Mock Logic based on prompt keywords
64
- if prompt and "打斗" in prompt:
65
- opts = [
66
- "凯侧身闪过那一记重拳,义体关节发出尖锐的摩擦声。他顺势抓住对方的手腕,电流顺着接触点瞬间爆发。",
67
- "激光刃切开空气,留下一道灼热的残影。凯没有退缩,他的视觉系统已经计算出了对方唯一的破绽。",
68
- "周围的空气仿佛凝固了,只剩下心跳声和能量枪充能的嗡嗡声。谁先动,谁就会死。"
69
- ]
70
- elif prompt and "风景" in prompt:
71
- opts = [
72
- "酸雨冲刷着生锈的金属外墙,流下一道道黑色的泪痕。远处的全息广告牌在雨雾中显得格外刺眼。",
73
- "清晨的阳光穿透厚重的雾霾,无力地洒在贫民窟的屋顶上。这里没有希望,只有生存。",
74
- "夜之城的地下就像是一个巨大的迷宫,管道交错,蒸汽弥漫,老鼠和瘾君子在阴影中通过眼神交流。"
75
- ]
76
- else:
77
- opts = MOCK_INSPIRATIONS
78
-
79
- return gr.update(visible=True), opts[0], opts[1], opts[2]
80
-
81
- def apply_inspiration(current_text, inspiration_text):
82
- """Append selected inspiration to the editor."""
83
- if not current_text:
84
- new_text = inspiration_text
85
- else:
86
- new_text = current_text + "\n\n" + inspiration_text
87
- return new_text, gr.update(visible=False), "" # Clear prompt
88
-
89
  def dismiss_inspiration():
90
  return gr.update(visible=False)
91
 
92
- def fetch_flow_suggestion(current_text):
93
- """Simulate fetching a short continuation."""
94
- # If text ends with newline, maybe don't suggest? Or suggest new paragraph start.
95
- time.sleep(0.5)
96
- return random.choice(MOCK_FLOW_SUGGESTIONS)
97
-
98
- def accept_flow_suggestion(current_text, suggestion):
99
- if not suggestion or "等待输入" in suggestion:
100
- return current_text
101
- return current_text + suggestion
102
-
103
- def refresh_context(current_outline):
104
- """Mock refreshing the outline context (auto-complete task or add new one)."""
105
- new_outline = [row[:] for row in current_outline]
106
-
107
- # Try to complete the first pending task
108
- task_completed = False
109
- for row in new_outline:
110
- if not row[0]:
111
- row[0] = True
112
- task_completed = True
113
- break
114
-
115
- # If all done, or randomly, add a new event
116
- if not task_completed or random.random() > 0.7:
117
- new_outline.append([False, f"新的动态事件: 突发情况 #{random.randint(100, 999)}"])
118
-
119
- return new_outline
120
-
121
  # --- UI Construction ---
122
 
123
  def create_smart_writer_tab():
124
- # Hidden Buttons for JS triggers
125
- btn_accept_flow_trigger = gr.Button(visible=False, elem_id="btn_accept_flow_trigger")
126
- btn_refresh_context_trigger = gr.Button(visible=False, elem_id="btn_refresh_context_trigger")
127
 
128
  with gr.Row(equal_height=False, elem_id="indicator-writing-tab"):
129
  # --- Left Column: Entity Console ---
@@ -147,6 +76,18 @@ def create_smart_writer_tab():
147
  label="知识库",
148
  wrap=True
149
  )
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
  with gr.Accordion("当前章节大纲 (Short-Term)", open=True):
152
  short_outline_input = gr.Dataframe(
@@ -157,6 +98,8 @@ def create_smart_writer_tab():
157
  label="当前章节大纲",
158
  col_count=(2, "fixed"),
159
  )
 
 
160
 
161
  with gr.Accordion("故事总纲 (Long-Term)", open=False):
162
  long_outline_input = gr.Dataframe(
@@ -173,7 +116,7 @@ def create_smart_writer_tab():
173
  # Toolbar
174
  with gr.Row(elem_classes=["toolbar"]):
175
  stats_display = gr.Markdown("0 Words | 0 mins")
176
- inspiration_btn = gr.Button("✨ 灵感扩写 (Cmd+Enter)", size="sm", variant="primary")
177
 
178
  # 主要编辑器区域
179
  editor = gr.Textbox(
@@ -188,14 +131,17 @@ def create_smart_writer_tab():
188
  # Flow Suggestion
189
  with gr.Row(variant="panel"):
190
  flow_suggestion_display = gr.Textbox(
191
- label="AI 实时续写建议 (按 Tab 采纳)",
192
- value="(等待输入...)",
193
  interactive=False,
194
  scale=4,
195
  elem_classes=["flow-suggestion-box"]
196
  )
197
- accept_flow_btn = gr.Button("采纳", scale=1, elem_id='btn-action-accept-flow')
198
- refresh_flow_btn = gr.Button("换一个", scale=1)
 
 
 
199
 
200
  # Inspiration Modal
201
  with gr.Group(visible=False) as inspiration_modal:
@@ -206,12 +152,12 @@ def create_smart_writer_tab():
206
  placeholder="例如:写一段激烈的打斗 / 描写赛博朋克夜景...",
207
  lines=1
208
  )
209
- refresh_inspiration_btn = gr.Button("生成选项")
210
 
211
  with gr.Row():
212
- opt1_btn = gr.Button(MOCK_INSPIRATIONS[0], elem_classes=["inspiration-card"])
213
- opt2_btn = gr.Button(MOCK_INSPIRATIONS[1], elem_classes=["inspiration-card"])
214
- opt3_btn = gr.Button(MOCK_INSPIRATIONS[2], elem_classes=["inspiration-card"])
215
  cancel_insp_btn = gr.Button("取消")
216
 
217
  # --- Interactions ---
@@ -220,43 +166,61 @@ def create_smart_writer_tab():
220
  editor.change(fn=get_stats, inputs=editor, outputs=stats_display)
221
 
222
  # 2. Inspiration Workflow
223
- # Open Modal (reset prompt)
224
- inspiration_btn.click(
225
- fn=lambda: (gr.update(visible=True), ""),
226
- outputs=[inspiration_modal, inspiration_prompt_input]
227
- )
228
 
229
  # Generate Options based on Prompt
230
  refresh_inspiration_btn.click(
231
- fn=fetch_inspiration,
232
- inputs=[inspiration_prompt_input],
233
  outputs=[inspiration_modal, opt1_btn, opt2_btn, opt3_btn]
234
  )
235
 
236
  # Apply Option
237
  for btn in [opt1_btn, opt2_btn, opt3_btn]:
238
  btn.click(
239
- fn=apply_inspiration,
240
  inputs=[editor, btn],
241
  outputs=[editor, inspiration_modal, inspiration_prompt_input]
242
  )
243
 
244
- cancel_insp_btn.click(fn=dismiss_inspiration, outputs=inspiration_modal)
245
-
246
- # 3. Flow Suggestion
247
- editor.change(fn=fetch_flow_suggestion, inputs=editor, outputs=flow_suggestion_display)
248
- refresh_flow_btn.click(fn=fetch_flow_suggestion, inputs=editor, outputs=flow_suggestion_display)
249
-
250
- # Accept Flow (Triggered by Button or Tab Key via JS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  accept_flow_fn_inputs = [editor, flow_suggestion_display]
252
  accept_flow_fn_outputs = [editor]
253
-
254
- accept_flow_btn.click(fn=accept_flow_suggestion, inputs=accept_flow_fn_inputs, outputs=accept_flow_fn_outputs)
255
- btn_accept_flow_trigger.click(fn=accept_flow_suggestion, inputs=accept_flow_fn_inputs, outputs=accept_flow_fn_outputs)
256
 
257
- # 4. Context Refresh (Triggered by Enter Key via JS)
258
- btn_refresh_context_trigger.click(
259
- fn=refresh_context,
260
- inputs=[short_outline_input],
261
  outputs=[short_outline_input]
262
  )
 
 
 
 
 
 
1
  import gradio as gr
2
  import time
3
+ from smart_writer_kit.agent_for_streaming_completion import fetch_flow_suggestion_agent, accept_flow_suggestion_agent
4
+ from smart_writer_kit.agent_for_inspiration_expansion import fetch_inspiration_agent, apply_inspiration_agent
5
+ from smart_writer_kit.agent_for_outline_update import update_outline_status_agent
6
+ from smart_writer_kit.agent_for_kb_update import suggest_new_kb_terms_agent
7
 
8
+ # --- Mock Data (for UI population only) ---
9
 
10
  MOCK_STYLE = """风格:赛博朋克 / 黑色电影
11
  视角:第三人称限制视角(主角:凯)
 
36
  [False, "与荒坂公司的最终决战。"]
37
  ]
38
 
39
+ # --- UI Helper Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  def get_stats(text):
41
+ """Calculate word count and read time."""
42
  if not text:
43
  return "0 Words | 0 mins"
44
+ words = len(text.split())
45
+ read_time = max(1, words // 200) # Average reading speed
46
  return f"{words} Words | ~{read_time} mins"
47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def dismiss_inspiration():
49
  return gr.update(visible=False)
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  # --- UI Construction ---
52
 
53
  def create_smart_writer_tab():
54
+ debounce_state = gr.State({"last_change": 0, "active": False})
55
+ debounce_timer = gr.Timer(0.5, active=False)
 
56
 
57
  with gr.Row(equal_height=False, elem_id="indicator-writing-tab"):
58
  # --- Left Column: Entity Console ---
 
76
  label="知识库",
77
  wrap=True
78
  )
79
+ with gr.Row():
80
+ btn_suggest_kb = gr.Button("🔍 提取新词条", size="sm")
81
+
82
+ md_suggested_terms_header = gr.Markdown("#### 推荐词条", visible=False) # Placeholder for suggested terms
83
+ suggested_kb_dataframe = gr.Dataframe(
84
+ headers=["Term", "Description"],
85
+ datatype=["str", "str"],
86
+ visible=False, # Initially hidden
87
+ interactive=False,
88
+ label="推荐词条"
89
+ )
90
+
91
 
92
  with gr.Accordion("当前章节大纲 (Short-Term)", open=True):
93
  short_outline_input = gr.Dataframe(
 
98
  label="当前章节大纲",
99
  col_count=(2, "fixed"),
100
  )
101
+ with gr.Row():
102
+ btn_sync_outline = gr.Button("🔄 同步状态", size="sm")
103
 
104
  with gr.Accordion("故事总纲 (Long-Term)", open=False):
105
  long_outline_input = gr.Dataframe(
 
116
  # Toolbar
117
  with gr.Row(elem_classes=["toolbar"]):
118
  stats_display = gr.Markdown("0 Words | 0 mins")
119
+ inspiration_btn = gr.Button("✨ 继续整段 (Cmd/Ctrl+Enter)", size="sm", variant="primary", elem_id="btn-action-create-paragraph")
120
 
121
  # 主要编辑器区域
122
  editor = gr.Textbox(
 
131
  # Flow Suggestion
132
  with gr.Row(variant="panel"):
133
  flow_suggestion_display = gr.Textbox(
134
+ label="AI 实时续写建议 (按 Tab 采纳)",
135
+ value="(等待输入...)",
136
  interactive=False,
137
  scale=4,
138
  elem_classes=["flow-suggestion-box"]
139
  )
140
+ accept_flow_btn = gr.Button("采纳(Tab)", scale=1, elem_id='btn-action-accept-flow')
141
+ refresh_flow_btn = gr.Button("换一个(Shift+Tab)", scale=1, elem_id='btn-action-change-flow')
142
+
143
+ # Debounce Progress
144
+ debounce_progress = gr.HTML(value="", visible=False)
145
 
146
  # Inspiration Modal
147
  with gr.Group(visible=False) as inspiration_modal:
 
152
  placeholder="例如:写一段激烈的打斗 / 描写赛博朋克夜景...",
153
  lines=1
154
  )
155
+ refresh_inspiration_btn = gr.Button("生成选项(Shift+Enter)")
156
 
157
  with gr.Row():
158
+ opt1_btn = gr.Button("...", elem_classes=["inspiration-card"])
159
+ opt2_btn = gr.Button("...", elem_classes=["inspiration-card"])
160
+ opt3_btn = gr.Button("...", elem_classes=["inspiration-card"])
161
  cancel_insp_btn = gr.Button("取消")
162
 
163
  # --- Interactions ---
 
166
  editor.change(fn=get_stats, inputs=editor, outputs=stats_display)
167
 
168
  # 2. Inspiration Workflow
169
+ # Open Modal (triggered by visible button or hidden trigger button for Cmd+Enter)
170
+ open_inspiration_modal_fn = lambda: (gr.update(visible=True), "")
171
+ inspiration_btn.click(fn=open_inspiration_modal_fn, outputs=[inspiration_modal, inspiration_prompt_input])
 
 
172
 
173
  # Generate Options based on Prompt
174
  refresh_inspiration_btn.click(
175
+ fn=fetch_inspiration_agent,
176
+ inputs=[inspiration_prompt_input, editor, style_input, kb_input, short_outline_input, long_outline_input],
177
  outputs=[inspiration_modal, opt1_btn, opt2_btn, opt3_btn]
178
  )
179
 
180
  # Apply Option
181
  for btn in [opt1_btn, opt2_btn, opt3_btn]:
182
  btn.click(
183
+ fn=apply_inspiration_agent,
184
  inputs=[editor, btn],
185
  outputs=[editor, inspiration_modal, inspiration_prompt_input]
186
  )
187
 
188
+ cancel_insp_btn.click(fn=dismiss_inspiration, outputs=inspiration_modal, show_progress="hidden")
189
+
190
+ # 3. Flow Suggestion with Debounce
191
+ def start_debounce(editor_content):
192
+ return {"last_change": time.time(), "active": True}, gr.update(active=True), gr.update(visible=True, value="<progress value='0' max='100'></progress> 补全中... 3.0s")
193
+
194
+ def update_debounce(debounce_state, editor_content):
195
+ if not debounce_state["active"]:
196
+ return gr.update(), gr.update(), debounce_state, gr.update()
197
+ elapsed = time.time() - debounce_state["last_change"]
198
+ if elapsed >= 3:
199
+ suggestion = fetch_flow_suggestion_agent(editor_content)
200
+ return gr.update(visible=False), suggestion, {"last_change": 0, "active": False}, gr.update(active=False)
201
+ else:
202
+ progress = int((elapsed / 3) * 100)
203
+ remaining = 3 - elapsed
204
+ progress_html = f"<progress value='{progress}' max='100'></progress> 补全中... {remaining:.1f}s"
205
+ return gr.update(value=progress_html), gr.update(), debounce_state, gr.update()
206
+
207
+ editor.change(fn=start_debounce, inputs=editor, outputs=[debounce_state, debounce_timer, debounce_progress])
208
+ debounce_timer.tick(fn=update_debounce, inputs=[debounce_state, editor], outputs=[debounce_progress, flow_suggestion_display, debounce_state, debounce_timer])
209
+ refresh_flow_btn.click(fn=fetch_flow_suggestion_agent, inputs=editor, outputs=flow_suggestion_display)
210
+
211
+ # Accept Flow (Triggered by visible Button or hidden Tab Key trigger)
212
  accept_flow_fn_inputs = [editor, flow_suggestion_display]
213
  accept_flow_fn_outputs = [editor]
214
+ accept_flow_btn.click(fn=accept_flow_suggestion_agent, inputs=accept_flow_fn_inputs, outputs=accept_flow_fn_outputs, show_progress="hidden")
 
 
215
 
216
+ # 4. Agent-based Context Updates
217
+ btn_sync_outline.click(
218
+ fn=update_outline_status_agent,
219
+ inputs=[short_outline_input, editor],
220
  outputs=[short_outline_input]
221
  )
222
+ btn_suggest_kb.click(
223
+ fn=suggest_new_kb_terms_agent,
224
+ inputs=[kb_input, editor],
225
+ outputs=[suggested_kb_dataframe, md_suggested_terms_header]
226
+ )