File size: 6,616 Bytes
74ebe5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63e4846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74ebe5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import gradio as gr
import pandas as pd
from model_handler import ModelHandler
from config import LING_1T

def _format_df_to_string(df: pd.DataFrame, title: str) -> str:
    """Formats a pandas DataFrame into a markdown-like string for the prompt."""
    if df is None or df.empty:
        return ""
    
    header = f"### {title}\n"
    rows = []
    for _, row in df.iterrows():
        if 'Done' in df.columns and 'Task' in df.columns:
            status = "[x]" if row['Done'] else "[ ]"
            rows.append(f"- {status} {row['Task']}")
        elif 'Term' in df.columns and 'Description' in df.columns:
            rows.append(f"- **{row['Term']}**: {row['Description']}")
    
    return header + "\n".join(rows) + "\n\n"

def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
    """
    Agent for fetching inspiration options using a real LLM.
    """
    print("\n[Agent][fetch_inspiration_agent] === 推理类型:灵感扩写 ===")
    print("【发出的完整上下文】")
    print("prompt:", repr(prompt))
    print("editor_content:", repr(editor_content))
    print("style:", repr(style))
    print("kb_df:", repr(kb_df.to_dict("records")))
    print("short_outline_df:", repr(short_outline_df.to_dict("records")))
    print("long_outline_df:", repr(long_outline_df.to_dict("records")))

    try:
        # 1. Format context from UI inputs
        style_context = f"### 整体章程\n{style}\n\n"
        kb_context = _format_df_to_string(kb_df, "知识库")
        short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
        long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")

        # 2. Build System Prompt
        system_prompt = (
            "你是一个富有创意的长篇小说家,你的任务是根据提供的背景设定和当前文本,创作三个不同的、有创意的剧情发展方向。\n"
            "请严格遵守以下格式:直接开始写第一个选项,然后用 `[END_OF_CHOICE]` 作为分隔符,接着写第二个选项,再用 `[END_OF_CHOICE]` 分隔,最后写第三个选项。不要有任何额外的解释或编号。\n"
            "例如:\n"
            "剧情发展一的内容...[END_OF_CHOICE]剧情发展二的内容...[END_OF_CHOICE]剧情发展三的内容..."
        )
        
        # 3. Build User Prompt
        full_context = style_context + kb_context + long_outline_context + short_outline_context
        user_prompt = (
            f"### 背景设定与大纲\n{full_context}\n"
            f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
            f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情。'}"
        )

        # 4. Call LLM
        model_handler = ModelHandler()
        response_generator = model_handler.generate_code(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            model_choice=LING_1T
        )
        
        full_response = "".join(chunk for chunk in response_generator)

        print("【收到的完整上下文】")
        print("full_response:", repr(full_response))

        # 5. Parse response and update UI
        choices = full_response.split("[END_OF_CHOICE]")
        # Ensure we have exactly 3 choices, padding with placeholders if necessary
        choices += ["(模型未生成足够选项)"] * (3 - len(choices))
        
        print(f"[Agent] LLM Choices Received: {len(choices)}")

        return gr.update(visible=True), choices[0].strip(), choices[1].strip(), choices[2].strip()

    except Exception as e:
        print(f"[Agent] Error fetching inspiration: {e}")
        error_message = f"获取灵感时出错: {e}"
        return gr.update(visible=True), error_message, "请检查日志", "请检查日志"


def fetch_paragraph_continuation_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
    """
    Agent for fetching a single paragraph continuation (Ribbon UI version).
    """
    print("\n[Agent][fetch_paragraph_continuation_agent] === 推理类型:整段续写 (Single) ===")
    try:
        # 1. Format context
        style_context = f"### 整体章程\n{style}\n\n"
        kb_context = _format_df_to_string(kb_df, "知识库")
        short_outline_context = _format_df_to_string(short_outline_df, "当前章节大纲")
        long_outline_context = _format_df_to_string(long_outline_df, "故事总纲")

        # 2. Build System Prompt
        system_prompt = (
            "你是一个富有创意的长篇小说家。请根据提供的背景设定和当前文本,自然地续写一段高质量的剧情。\n"
            "请直接输出续写内容,不要包含任何解释、前缀或后缀。"
        )
        
        # 3. Build User Prompt
        full_context = style_context + kb_context + long_outline_context + short_outline_context
        user_prompt = (
            f"### 背景设定与大纲\n{full_context}\n"
            f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
            f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情,写一个完整的段落。'}"
        )

        # 4. Call LLM
        model_handler = ModelHandler()
        response_generator = model_handler.generate_code(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            model_choice=LING_1T
        )
        
        full_response = "".join(chunk for chunk in response_generator)
        return full_response.strip()

    except Exception as e:
        print(f"[Agent] Error fetching paragraph continuation: {e}")
        return f"获取续写时出错: {e}"



def apply_inspiration_agent(current_text: str, inspiration_text: str):
    """
    Agent for applying selected inspiration to the editor.
    """
    print("\n[Agent][apply_inspiration_agent] === 推理类型:应用灵感 ===")
    print("【发出的完整上下文】")
    print("current_text:", repr(current_text))
    print("inspiration_text:", repr(inspiration_text))
    if not current_text:
        new_text = inspiration_text
    else:
        new_text = current_text + "\n\n" + inspiration_text
    print("【收到的完整上下文】")
    print("new_text:", repr(new_text))
    # Return a tuple that unpacks into the outputs for the Gradio event handler
    return new_text, gr.update(visible=False), ""