ling-series-spaces / smart_writer_kit /agent_for_inspiration_expansion.py
GitHub Action
Sync ling-space changes from GitHub commit c4aec86
63e4846
raw
history blame
6.62 kB
import gradio as gr
import pandas as pd
from model_handler import ModelHandler
from config import LING_1T
def _format_df_to_string(df: pd.DataFrame, title: str) -> str:
"""Formats a pandas DataFrame into a markdown-like string for the prompt."""
if df is None or df.empty:
return ""
header = f"### {title}\n"
rows = []
for _, row in df.iterrows():
if 'Done' in df.columns and 'Task' in df.columns:
status = "[x]" if row['Done'] else "[ ]"
rows.append(f"- {status} {row['Task']}")
elif 'Term' in df.columns and 'Description' in df.columns:
rows.append(f"- **{row['Term']}**: {row['Description']}")
return header + "\n".join(rows) + "\n\n"
def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
"""
Agent for fetching inspiration options using a real LLM.
"""
print("\n[Agent][fetch_inspiration_agent] === 推理类型:灵感扩写 ===")
print("【发出的完整上下文】")
print("prompt:", repr(prompt))
print("editor_content:", repr(editor_content))
print("style:", repr(style))
print("kb_df:", repr(kb_df.to_dict("records")))
print("short_outline_df:", repr(short_outline_df.to_dict("records")))
print("long_outline_df:", repr(long_outline_df.to_dict("records")))
try:
# 1. Format context from UI inputs
style_context = f"### 整体章程\n{style}\n\n"
kb_context = _format_df_to_string(kb_df, "知识库")
short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")
# 2. Build System Prompt
system_prompt = (
"你是一个富有创意的长篇小说家,你的任务是根据提供的背景设定和当前文本,创作三个不同的、有创意的剧情发展方向。\n"
"请严格遵守以下格式:直接开始写第一个选项,然后用 `[END_OF_CHOICE]` 作为分隔符,接着写第二个选项,再用 `[END_OF_CHOICE]` 分隔,最后写第三个选项。不要有任何额外的解释或编号。\n"
"例如:\n"
"剧情发展一的内容...[END_OF_CHOICE]剧情发展二的内容...[END_OF_CHOICE]剧情发展三的内容..."
)
# 3. Build User Prompt
full_context = style_context + kb_context + long_outline_context + short_outline_context
user_prompt = (
f"### 背景设定与大纲\n{full_context}\n"
f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情。'}"
)
# 4. Call LLM
model_handler = ModelHandler()
response_generator = model_handler.generate_code(
system_prompt=system_prompt,
user_prompt=user_prompt,
model_choice=LING_1T
)
full_response = "".join(chunk for chunk in response_generator)
print("【收到的完整上下文】")
print("full_response:", repr(full_response))
# 5. Parse response and update UI
choices = full_response.split("[END_OF_CHOICE]")
# Ensure we have exactly 3 choices, padding with placeholders if necessary
choices += ["(模型未生成足够选项)"] * (3 - len(choices))
print(f"[Agent] LLM Choices Received: {len(choices)}")
return gr.update(visible=True), choices[0].strip(), choices[1].strip(), choices[2].strip()
except Exception as e:
print(f"[Agent] Error fetching inspiration: {e}")
error_message = f"获取灵感时出错: {e}"
return gr.update(visible=True), error_message, "请检查日志", "请检查日志"
def fetch_paragraph_continuation_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
"""
Agent for fetching a single paragraph continuation (Ribbon UI version).
"""
print("\n[Agent][fetch_paragraph_continuation_agent] === 推理类型:整段续写 (Single) ===")
try:
# 1. Format context
style_context = f"### 整体章程\n{style}\n\n"
kb_context = _format_df_to_string(kb_df, "知识库")
short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")
# 2. Build System Prompt
system_prompt = (
"你是一个富有创意的长篇小说家。请根据提供的背景设定和当前文本,自然地续写一段高质量的剧情。\n"
"请直接输出续写内容,不要包含任何解释、前缀或后缀。"
)
# 3. Build User Prompt
full_context = style_context + kb_context + long_outline_context + short_outline_context
user_prompt = (
f"### 背景设定与大纲\n{full_context}\n"
f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情,写一个完整的段落。'}"
)
# 4. Call LLM
model_handler = ModelHandler()
response_generator = model_handler.generate_code(
system_prompt=system_prompt,
user_prompt=user_prompt,
model_choice=LING_1T
)
full_response = "".join(chunk for chunk in response_generator)
return full_response.strip()
except Exception as e:
print(f"[Agent] Error fetching paragraph continuation: {e}")
return f"获取续写时出错: {e}"
def apply_inspiration_agent(current_text: str, inspiration_text: str):
"""
Agent for applying selected inspiration to the editor.
"""
print("\n[Agent][apply_inspiration_agent] === 推理类型:应用灵感 ===")
print("【发出的完整上下文】")
print("current_text:", repr(current_text))
print("inspiration_text:", repr(inspiration_text))
if not current_text:
new_text = inspiration_text
else:
new_text = current_text + "\n\n" + inspiration_text
print("【收到的完整上下文】")
print("new_text:", repr(new_text))
# Return a tuple that unpacks into the outputs for the Gradio event handler
return new_text, gr.update(visible=False), ""