akhaliq HF Staff commited on
Commit
0a3c558
·
1 Parent(s): e9d0363

add agent mode

Browse files
Files changed (2) hide show
  1. anycoder_app/agent.py +234 -0
  2. anycoder_app/ui.py +120 -21
anycoder_app/agent.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Agent functionality for interactive code generation with follow-up questions and task planning.
3
+ """
4
+ import os
5
+ from typing import Dict, List, Optional, Tuple, Generator
6
+ import gradio as gr
7
+
8
+ from .models import (
9
+ get_inference_client, get_real_model_id, history_to_messages,
10
+ history_to_chatbot_messages, strip_thinking_tags
11
+ )
12
+ from .deploy import generation_code
13
+
14
+
15
+ def agent_generate_with_questions(
16
+ query: Optional[str],
17
+ setting: Dict[str, str],
18
+ history: List,
19
+ current_model: Dict,
20
+ language: str,
21
+ provider: str,
22
+ profile: Optional[gr.OAuthProfile] = None,
23
+ token: Optional[gr.OAuthToken] = None,
24
+ max_questions: int = 3
25
+ ) -> Generator[Tuple[List, List], None, None]:
26
+ """
27
+ Agent that asks follow-up questions, creates a task list, and generates code.
28
+
29
+ Args:
30
+ query: Initial user request
31
+ setting: System settings
32
+ history: Conversation history
33
+ current_model: Selected model configuration
34
+ language: Target programming language/framework
35
+ provider: Model provider
36
+ profile: User OAuth profile
37
+ token: User OAuth token
38
+ max_questions: Maximum number of follow-up questions to ask
39
+
40
+ Yields:
41
+ Tuples of (history, chatbot_messages) at each step
42
+ """
43
+ if not query or not query.strip():
44
+ return
45
+
46
+ # Initialize history with user's initial query
47
+ current_history = history + [[query, ""]]
48
+
49
+ # Step 1: Agent analyzes the request and asks follow-up questions
50
+ agent_system_prompt = """You are a helpful coding assistant that helps users clarify their requirements before generating code.
51
+
52
+ Your task is to:
53
+ 1. Analyze the user's request
54
+ 2. Ask 1-3 clarifying questions to better understand their needs
55
+ 3. Focus on important details like:
56
+ - Target audience and use case
57
+ - Specific features or functionality needed
58
+ - Design preferences (colors, layout, style)
59
+ - Data sources or APIs to integrate
60
+ - Performance or scalability requirements
61
+
62
+ Output ONLY the questions, numbered 1, 2, 3, etc. Keep questions concise and focused.
63
+ Do not generate code yet - just ask the questions."""
64
+
65
+ # Get LLM client
66
+ client = get_inference_client(current_model.get('model_id', 'Qwen/Qwen2.5-Coder-32B-Instruct'), provider)
67
+ model_id = get_real_model_id(current_model.get('model_id', 'Qwen/Qwen2.5-Coder-32B-Instruct'))
68
+
69
+ # Prepare messages for follow-up questions
70
+ messages = [
71
+ {'role': 'system', 'content': agent_system_prompt},
72
+ {'role': 'user', 'content': f"User wants to create: {query}\n\nLanguage/Framework: {language}\n\nAsk clarifying questions."}
73
+ ]
74
+
75
+ # Generate follow-up questions
76
+ questions_response = ""
77
+ try:
78
+ # Try to use the client (works for both InferenceClient and OpenAI-compatible clients)
79
+ stream = client.chat.completions.create(
80
+ model=model_id,
81
+ messages=messages,
82
+ temperature=0.7,
83
+ max_tokens=500,
84
+ stream=True
85
+ )
86
+ for chunk in stream:
87
+ if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
88
+ questions_response += chunk.choices[0].delta.content
89
+ # Update display in real-time
90
+ temp_history = current_history[:-1] + [[query, f"🤔 **Analyzing your request...**\n\n{questions_response}"]]
91
+ yield (temp_history, history_to_chatbot_messages(temp_history))
92
+ except Exception as e:
93
+ error_msg = f"❌ Error asking follow-up questions: {str(e)}"
94
+ temp_history = current_history[:-1] + [[query, error_msg]]
95
+ yield (temp_history, history_to_chatbot_messages(temp_history))
96
+ return
97
+
98
+ # Update history with agent's questions
99
+ current_history[-1][1] = f"🤔 **Let me ask you a few questions to better understand your needs:**\n\n{questions_response}\n\n💬 Please answer these questions in your next message."
100
+ yield (current_history, history_to_chatbot_messages(current_history))
101
+
102
+ # Wait for user response (this will be handled by the UI)
103
+ # For now, we'll return and let the user respond, then continue in the next call
104
+ return
105
+
106
+
107
+ def agent_process_answers_and_generate(
108
+ user_answers: str,
109
+ original_query: str,
110
+ questions: str,
111
+ setting: Dict[str, str],
112
+ history: List,
113
+ current_model: Dict,
114
+ language: str,
115
+ provider: str,
116
+ profile: Optional[gr.OAuthProfile] = None,
117
+ token: Optional[gr.OAuthToken] = None,
118
+ code_output=None,
119
+ history_output=None,
120
+ history_state=None
121
+ ) -> Generator:
122
+ """
123
+ Process user's answers, create task list, and generate code.
124
+
125
+ Args:
126
+ user_answers: User's responses to the questions
127
+ original_query: Original user request
128
+ questions: Agent's questions
129
+ setting: System settings
130
+ history: Conversation history
131
+ current_model: Selected model configuration
132
+ language: Target programming language/framework
133
+ provider: Model provider
134
+ profile: User OAuth profile
135
+ token: User OAuth token
136
+ code_output: Code output component
137
+ history_output: History output component
138
+ history_state: History state
139
+
140
+ Yields:
141
+ Updates to code output and history
142
+ """
143
+ # Step 2: Create task list based on answers
144
+ task_planning_prompt = f"""Based on the user's request and their answers, create a detailed task list for implementing the solution.
145
+
146
+ Original Request: {original_query}
147
+
148
+ Questions Asked:
149
+ {questions}
150
+
151
+ User's Answers:
152
+ {user_answers}
153
+
154
+ Create a numbered task list with 5-8 specific, actionable tasks. Each task should be clear and focused.
155
+ Start with "📋 **Task List:**" and then list the tasks."""
156
+
157
+ client = get_inference_client(current_model.get('model_id', 'Qwen/Qwen2.5-Coder-32B-Instruct'), provider)
158
+ model_id = get_real_model_id(current_model.get('model_id', 'Qwen/Qwen2.5-Coder-32B-Instruct'))
159
+
160
+ messages = [
161
+ {'role': 'system', 'content': 'You are a helpful coding assistant creating a task plan.'},
162
+ {'role': 'user', 'content': task_planning_prompt}
163
+ ]
164
+
165
+ # Generate task list
166
+ task_list = ""
167
+ try:
168
+ stream = client.chat.completions.create(
169
+ model=model_id,
170
+ messages=messages,
171
+ temperature=0.7,
172
+ max_tokens=800,
173
+ stream=True
174
+ )
175
+ for chunk in stream:
176
+ if hasattr(chunk.choices[0].delta, 'content') and chunk.choices[0].delta.content:
177
+ task_list += chunk.choices[0].delta.content
178
+ # Update display
179
+ temp_history = history + [[user_answers, f"📋 **Creating task list...**\n\n{task_list}"]]
180
+ yield {
181
+ history_state: temp_history,
182
+ history_output: history_to_chatbot_messages(temp_history)
183
+ }
184
+ except Exception as e:
185
+ error_msg = f"❌ Error creating task list: {str(e)}"
186
+ temp_history = history + [[user_answers, error_msg]]
187
+ yield {
188
+ history_state: temp_history,
189
+ history_output: history_to_chatbot_messages(temp_history)
190
+ }
191
+ return
192
+
193
+ # Update history with task list
194
+ updated_history = history + [[user_answers, task_list]]
195
+ yield {
196
+ history_state: updated_history,
197
+ history_output: history_to_chatbot_messages(updated_history)
198
+ }
199
+
200
+ # Step 3: Generate code based on refined requirements
201
+ refined_query = f"""{original_query}
202
+
203
+ Additional Requirements (based on follow-up):
204
+ {user_answers}
205
+
206
+ Task List:
207
+ {task_list}
208
+
209
+ Please implement the above requirements following the task list."""
210
+
211
+ # Add a message indicating code generation is starting
212
+ code_gen_start_history = updated_history + [["[System]", "🚀 **Starting code generation based on your requirements...**"]]
213
+ yield {
214
+ history_state: code_gen_start_history,
215
+ history_output: history_to_chatbot_messages(code_gen_start_history)
216
+ }
217
+
218
+ # Use the existing generation_code function for actual code generation
219
+ # We need to pass the refined query and updated history
220
+ for result in generation_code(
221
+ refined_query,
222
+ setting,
223
+ updated_history,
224
+ current_model,
225
+ language,
226
+ provider,
227
+ profile,
228
+ token,
229
+ code_output,
230
+ history_output,
231
+ history_state
232
+ ):
233
+ yield result
234
+
anycoder_app/ui.py CHANGED
@@ -29,6 +29,9 @@ from .deploy import (
29
  generate_requirements_txt_with_llm, prettify_comfyui_json_for_html,
30
  get_trending_models, import_model_from_hf, get_trending_spaces, import_space_from_hf
31
  )
 
 
 
32
 
33
  # Main application with proper Gradio theming
34
  with gr.Blocks(
@@ -91,6 +94,12 @@ with gr.Blocks(
91
  last_login_state = gr.State(None)
92
  models_first_change = gr.State(True)
93
  spaces_first_change = gr.State(True)
 
 
 
 
 
 
94
 
95
  with gr.Sidebar() as sidebar:
96
  login_button = gr.LoginButton()
@@ -166,6 +175,15 @@ with gr.Blocks(
166
  label="Code Language",
167
  visible=True
168
  )
 
 
 
 
 
 
 
 
 
169
  # Removed image generation components
170
  with gr.Row():
171
  btn = gr.Button("Generate", variant="secondary", size="lg", scale=2, visible=True, interactive=False)
@@ -861,35 +879,107 @@ with gr.Blocks(
861
 
862
 
863
 
864
- def begin_generation_ui():
865
- # Collapse the sidebar when generation starts; keep status hidden
866
- return [gr.update(open=False), gr.update(visible=False)]
 
 
 
 
 
867
 
868
  def end_generation_ui():
869
  # Open sidebar after generation; hide the status
870
  return [gr.update(open=True), gr.update(visible=False)]
 
 
 
 
871
 
872
- def generation_code_wrapper(inp, sett, hist, model, lang, prov, profile: Optional[gr.OAuthProfile] = None, token: Optional[gr.OAuthToken] = None):
873
- """Wrapper to call generation_code and pass component references"""
874
- # Generate code and update both history and chat_history
875
- for result in generation_code(inp, sett, hist, model, lang, prov, profile, token, code_output, history_output, history):
876
- # generation_code yields dictionaries with component keys
877
- # Extract the values and yield them for our outputs
878
- code_val = result.get(code_output, "")
879
- hist_val = result.get(history, hist)
880
- history_output_val = result.get(history_output, [])
881
- # Yield for: code_output, history, history_output, chat_history
882
- yield code_val, hist_val, history_output_val, history_output_val
883
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
884
  btn.click(
885
  begin_generation_ui,
886
- inputs=None,
887
  outputs=[sidebar, generating_status],
888
  show_progress="hidden",
889
  ).then(
890
  generation_code_wrapper,
891
- inputs=[input, setting, history, current_model, language_dropdown, provider_state],
892
- outputs=[code_output, history, history_output, chat_history]
893
  ).then(
894
  end_generation_ui,
895
  inputs=None,
@@ -931,13 +1021,13 @@ with gr.Blocks(
931
  # Pressing Enter in the main input should trigger generation and collapse the sidebar
932
  input.submit(
933
  begin_generation_ui,
934
- inputs=None,
935
  outputs=[sidebar, generating_status],
936
  show_progress="hidden",
937
  ).then(
938
  generation_code_wrapper,
939
- inputs=[input, setting, history, current_model, language_dropdown, provider_state],
940
- outputs=[code_output, history, history_output, chat_history]
941
  ).then(
942
  end_generation_ui,
943
  inputs=None,
@@ -1001,8 +1091,17 @@ with gr.Blocks(
1001
  </div>
1002
  """
1003
 
 
 
 
 
 
 
 
 
1004
  clear_btn.click(clear_history, outputs=[history, history_output, chat_history])
1005
  clear_btn.click(hide_deploy_components, None, [deploy_btn])
 
1006
  # Reset button text when clearing
1007
  clear_btn.click(
1008
  lambda: gr.update(value="Publish"),
 
29
  generate_requirements_txt_with_llm, prettify_comfyui_json_for_html,
30
  get_trending_models, import_model_from_hf, get_trending_spaces, import_space_from_hf
31
  )
32
+ from .agent import (
33
+ agent_generate_with_questions, agent_process_answers_and_generate
34
+ )
35
 
36
  # Main application with proper Gradio theming
37
  with gr.Blocks(
 
94
  last_login_state = gr.State(None)
95
  models_first_change = gr.State(True)
96
  spaces_first_change = gr.State(True)
97
+ agent_mode_enabled = gr.State(False)
98
+ agent_conversation_state = gr.State({
99
+ "stage": "initial", # initial, waiting_for_answers, generating
100
+ "original_query": "",
101
+ "questions": ""
102
+ })
103
 
104
  with gr.Sidebar() as sidebar:
105
  login_button = gr.LoginButton()
 
175
  label="Code Language",
176
  visible=True
177
  )
178
+
179
+ # Agent mode checkbox
180
+ agent_mode_checkbox = gr.Checkbox(
181
+ label="🤖 Enable Agent Mode",
182
+ value=False,
183
+ info="Agent will ask follow-up questions and create a task list before coding",
184
+ visible=True
185
+ )
186
+
187
  # Removed image generation components
188
  with gr.Row():
189
  btn = gr.Button("Generate", variant="secondary", size="lg", scale=2, visible=True, interactive=False)
 
879
 
880
 
881
 
882
+ def begin_generation_ui(agent_enabled):
883
+ # In agent mode, keep sidebar open during question/task planning phase
884
+ # Only close it when actual code generation starts
885
+ if agent_enabled:
886
+ return [gr.update(), gr.update(visible=False)] # Keep sidebar as-is
887
+ else:
888
+ # Normal mode: collapse sidebar immediately
889
+ return [gr.update(open=False), gr.update(visible=False)]
890
 
891
  def end_generation_ui():
892
  # Open sidebar after generation; hide the status
893
  return [gr.update(open=True), gr.update(visible=False)]
894
+
895
+ def close_sidebar_for_coding():
896
+ # Close sidebar when transitioning to actual code generation
897
+ return gr.update(open=False)
898
 
899
+ def generation_code_wrapper(inp, sett, hist, model, lang, prov, agent_enabled, agent_state, profile: Optional[gr.OAuthProfile] = None, token: Optional[gr.OAuthToken] = None):
900
+ """Wrapper to call generation_code or agent mode based on settings"""
901
+
902
+ # Check if agent mode is enabled
903
+ if agent_enabled and agent_state["stage"] == "initial":
904
+ # Agent mode - first interaction, ask questions
905
+ # Sidebar stays open during this phase
906
+ for updated_hist, chatbot_msgs in agent_generate_with_questions(
907
+ inp, sett, hist, model, lang, prov, profile, token
908
+ ):
909
+ # Update agent state to track that we're waiting for answers
910
+ new_agent_state = {
911
+ "stage": "waiting_for_answers",
912
+ "original_query": inp,
913
+ "questions": updated_hist[-1][1] if updated_hist else ""
914
+ }
915
+ # Yield: code_output, history, history_output, chat_history, agent_conversation_state, sidebar
916
+ yield "", updated_hist, chatbot_msgs, chatbot_msgs, new_agent_state, gr.update()
917
+ return
918
+
919
+ elif agent_enabled and agent_state["stage"] == "waiting_for_answers":
920
+ # Agent mode - user has answered questions, now create task list and generate code
921
+ original_query = agent_state.get("original_query", "")
922
+ questions = agent_state.get("questions", "")
923
+
924
+ # Track whether we've started code generation to close sidebar
925
+ started_code_generation = False
926
+
927
+ # Process answers and generate code
928
+ for result in agent_process_answers_and_generate(
929
+ inp, original_query, questions, sett, hist, model, lang, prov,
930
+ profile, token, code_output, history_output, history
931
+ ):
932
+ # Extract values from result dict
933
+ code_val = result.get(code_output, "")
934
+ hist_val = result.get(history, hist)
935
+ history_output_val = result.get(history_output, [])
936
+
937
+ # Reset agent state after generation
938
+ reset_agent_state = {
939
+ "stage": "initial",
940
+ "original_query": "",
941
+ "questions": ""
942
+ }
943
+
944
+ # Close sidebar when we start generating code (when code_output has content)
945
+ if code_val and not started_code_generation:
946
+ sidebar_update = gr.update(open=False)
947
+ started_code_generation = True
948
+ else:
949
+ sidebar_update = gr.update()
950
+
951
+ # Yield: code_output, history, history_output, chat_history, agent_conversation_state, sidebar
952
+ yield code_val, hist_val, history_output_val, history_output_val, reset_agent_state, sidebar_update
953
+ return
954
+
955
+ else:
956
+ # Normal mode - direct code generation
957
+ # Sidebar was already closed by begin_generation_ui
958
+ for result in generation_code(inp, sett, hist, model, lang, prov, profile, token, code_output, history_output, history):
959
+ # generation_code yields dictionaries with component keys
960
+ # Extract the values and yield them for our outputs
961
+ code_val = result.get(code_output, "")
962
+ hist_val = result.get(history, hist)
963
+ history_output_val = result.get(history_output, [])
964
+ # Yield for: code_output, history, history_output, chat_history, agent_conversation_state, sidebar
965
+ yield code_val, hist_val, history_output_val, history_output_val, agent_state, gr.update()
966
+
967
+ # Update agent_mode_enabled state when checkbox changes
968
+ agent_mode_checkbox.change(
969
+ lambda enabled: enabled,
970
+ inputs=[agent_mode_checkbox],
971
+ outputs=[agent_mode_enabled]
972
+ )
973
+
974
  btn.click(
975
  begin_generation_ui,
976
+ inputs=[agent_mode_enabled],
977
  outputs=[sidebar, generating_status],
978
  show_progress="hidden",
979
  ).then(
980
  generation_code_wrapper,
981
+ inputs=[input, setting, history, current_model, language_dropdown, provider_state, agent_mode_enabled, agent_conversation_state],
982
+ outputs=[code_output, history, history_output, chat_history, agent_conversation_state, sidebar]
983
  ).then(
984
  end_generation_ui,
985
  inputs=None,
 
1021
  # Pressing Enter in the main input should trigger generation and collapse the sidebar
1022
  input.submit(
1023
  begin_generation_ui,
1024
+ inputs=[agent_mode_enabled],
1025
  outputs=[sidebar, generating_status],
1026
  show_progress="hidden",
1027
  ).then(
1028
  generation_code_wrapper,
1029
+ inputs=[input, setting, history, current_model, language_dropdown, provider_state, agent_mode_enabled, agent_conversation_state],
1030
+ outputs=[code_output, history, history_output, chat_history, agent_conversation_state, sidebar]
1031
  ).then(
1032
  end_generation_ui,
1033
  inputs=None,
 
1091
  </div>
1092
  """
1093
 
1094
+ def reset_agent_state():
1095
+ """Reset agent conversation state when clearing history"""
1096
+ return {
1097
+ "stage": "initial",
1098
+ "original_query": "",
1099
+ "questions": ""
1100
+ }
1101
+
1102
  clear_btn.click(clear_history, outputs=[history, history_output, chat_history])
1103
  clear_btn.click(hide_deploy_components, None, [deploy_btn])
1104
+ clear_btn.click(reset_agent_state, outputs=[agent_conversation_state])
1105
  # Reset button text when clearing
1106
  clear_btn.click(
1107
  lambda: gr.update(value="Publish"),