akhaliq HF Staff commited on
Commit
78016c6
Β·
1 Parent(s): 185b4ff
anycoder_app/deploy.py CHANGED
@@ -111,23 +111,30 @@ def generation_code(query: Optional[str], _setting: Dict[str, str], _history: Op
111
  last_assistant_msg = ""
112
  if _history and len(_history[-1]) > 1:
113
  last_assistant_msg = _history[-1][1]
114
- # Check for various content types that indicate an existing project
115
- if ('<!DOCTYPE html>' in last_assistant_msg or
116
- '<html' in last_assistant_msg or
117
- 'import gradio' in last_assistant_msg or
118
- 'import streamlit' in last_assistant_msg or
119
- 'def ' in last_assistant_msg and 'app' in last_assistant_msg or
120
- 'IMPORTED PROJECT FROM HUGGING FACE SPACE' in last_assistant_msg or
121
- 'InferenceClient' in last_assistant_msg or # Inference provider code
122
- 'from huggingface_hub import' in last_assistant_msg or # Inference provider code
123
- 'from transformers import' in last_assistant_msg or # Transformers code
124
- 'from diffusers import' in last_assistant_msg or # Diffusers code
125
- '=== index.html ===' in last_assistant_msg or
126
- '=== index.js ===' in last_assistant_msg or
127
- '=== style.css ===' in last_assistant_msg or
128
- '=== app.py ===' in last_assistant_msg or
129
- '=== requirements.txt ===' in last_assistant_msg):
130
- has_existing_content = True
 
 
 
 
 
 
 
131
 
132
  # If this is a modification request, try to apply search/replace first
133
  if has_existing_content and query.strip():
@@ -284,32 +291,8 @@ Generate the exact search/replace blocks needed to make these changes."""
284
 
285
  messages = history_to_messages(_history, system_prompt)
286
 
287
- # Check if user has imported inference provider or model code and enhance the query
288
- has_imported_model_code = False
289
- imported_model_info = ""
290
-
291
- if _history:
292
- for user_msg, assistant_msg in _history:
293
- # Check if this is an imported model
294
- if "Imported model:" in user_msg or "Imported inference provider code" in assistant_msg:
295
- has_imported_model_code = True
296
- # Extract the model code from assistant message
297
- if "InferenceClient" in assistant_msg or "from huggingface_hub import" in assistant_msg:
298
- # Provide specific instructions based on the type of app being created
299
- if language == "gradio":
300
- imported_model_info = f"\n\n**CRITICAL INSTRUCTION: The user has imported HuggingFace Inference Provider code in the previous message. You MUST use this code as the backend for the Gradio application. Create a Gradio interface that:**\n1. Uses the InferenceClient from the imported code\n2. Creates a chatbot interface with gr.ChatInterface or gr.Blocks\n3. Implements a function that calls the model using client.chat.completions.create()\n4. Handles streaming responses properly\n5. Includes proper error handling\n\n**DO NOT ignore the imported code - integrate it into your Gradio app!**"
301
- else:
302
- imported_model_info = f"\n\n**IMPORTANT: The user has already imported model inference code in the conversation history. When creating the {language} application, USE the imported inference code as the backend. Integrate it properly into your application.**"
303
- break
304
- elif "from transformers import" in assistant_msg or "from diffusers import" in assistant_msg:
305
- if language == "gradio":
306
- imported_model_info = f"\n\n**CRITICAL INSTRUCTION: The user has imported transformers/diffusers model code in the previous message. You MUST use this code as the backend for the Gradio application. Create a Gradio interface that:**\n1. Uses the model loading code from the imported code\n2. Creates an appropriate interface based on the model type\n3. Implements inference functions that use the imported model\n4. Includes proper error handling\n\n**DO NOT ignore the imported code - integrate it into your Gradio app!**"
307
- else:
308
- imported_model_info = f"\n\n**IMPORTANT: The user has already imported transformers/diffusers model code in the conversation history. When creating the {language} application, USE the imported model code as the backend. Integrate it properly into your application.**"
309
- break
310
-
311
- # Use the original query, enhanced with context about imported code if applicable
312
- enhanced_query = query + imported_model_info
313
 
314
  # Check if this is GLM-4.5 model and handle with simple HuggingFace InferenceClient
315
  if _current_model["id"] == "zai-org/GLM-4.5":
 
111
  last_assistant_msg = ""
112
  if _history and len(_history[-1]) > 1:
113
  last_assistant_msg = _history[-1][1]
114
+
115
+ # Check if this is imported model code (should NOT be treated as existing content to modify)
116
+ is_imported_model_code = (
117
+ "Imported model:" in _history[-1][0] or
118
+ "Imported inference provider code" in last_assistant_msg or
119
+ "Imported transformers/diffusers code" in last_assistant_msg or
120
+ "Switched code type" in _history[-1][0]
121
+ )
122
+
123
+ # Only treat as existing content if it's NOT imported model code
124
+ if not is_imported_model_code:
125
+ # Check for various content types that indicate an existing project
126
+ if ('<!DOCTYPE html>' in last_assistant_msg or
127
+ '<html' in last_assistant_msg or
128
+ 'import gradio' in last_assistant_msg or
129
+ 'import streamlit' in last_assistant_msg or
130
+ 'def ' in last_assistant_msg and 'app' in last_assistant_msg or
131
+ 'IMPORTED PROJECT FROM HUGGING FACE SPACE' in last_assistant_msg or
132
+ '=== index.html ===' in last_assistant_msg or
133
+ '=== index.js ===' in last_assistant_msg or
134
+ '=== style.css ===' in last_assistant_msg or
135
+ '=== app.py ===' in last_assistant_msg or
136
+ '=== requirements.txt ===' in last_assistant_msg):
137
+ has_existing_content = True
138
 
139
  # If this is a modification request, try to apply search/replace first
140
  if has_existing_content and query.strip():
 
291
 
292
  messages = history_to_messages(_history, system_prompt)
293
 
294
+ # Use the original query without any enhancements - let the system prompt handle everything
295
+ enhanced_query = query
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  # Check if this is GLM-4.5 model and handle with simple HuggingFace InferenceClient
298
  if _current_model["id"] == "zai-org/GLM-4.5":
anycoder_app/docs_manager.py CHANGED
@@ -383,6 +383,66 @@ def update_gradio_system_prompts():
383
  - Generate ONLY the requested code files and requirements.txt
384
  - No explanatory text outside the code blocks
385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
  ## Multi-File Application Structure
387
 
388
  When creating complex Gradio applications, organize your code into multiple files for better maintainability:
@@ -1286,8 +1346,14 @@ This reference is automatically synced from https://fastrtc.org/llms.txt to ensu
1286
  search_prompt += fastrtc_section
1287
 
1288
  # Update the prompts in the prompts module
1289
- prompts.GRADIO_SYSTEM_PROMPT = base_prompt + docs_content + "\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns.\n\nIMPORTANT: Always include \"Built with anycoder\" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"
1290
- prompts.GRADIO_SYSTEM_PROMPT_WITH_SEARCH = search_prompt + docs_content + "\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns.\n\nIMPORTANT: Always include \"Built with anycoder\" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"
 
 
 
 
 
 
1291
 
1292
  def update_json_system_prompts():
1293
  """Update the global JSON system prompts with latest ComfyUI documentation"""
 
383
  - Generate ONLY the requested code files and requirements.txt
384
  - No explanatory text outside the code blocks
385
 
386
+ ## 🎯 Working with Imported Model Code
387
+
388
+ **CRITICAL: If the user has imported model code in the conversation history (InferenceClient, transformers, diffusers), you MUST integrate it into your Gradio application!**
389
+
390
+ **For InferenceClient Code (HuggingFace Inference API):**
391
+ - DO NOT just copy the standalone inference code
392
+ - Create a complete Gradio application that wraps the inference code
393
+ - Use `gr.ChatInterface()` for chat models or appropriate interface for other tasks
394
+ - Extract the model name from the imported code
395
+ - Implement proper streaming if the model supports it
396
+ - Handle conversation history correctly
397
+
398
+ **Example Structure for Chatbot:**
399
+ ```python
400
+ import gradio as gr
401
+ import os
402
+ from huggingface_hub import InferenceClient
403
+
404
+ # Use the InferenceClient configuration from imported code
405
+ client = InferenceClient(api_key=os.environ["HF_TOKEN"])
406
+
407
+ def respond(message, history):
408
+ # Build messages from history
409
+ messages = [{"role": "system", "content": "You are a helpful assistant."}]
410
+ for user_msg, assistant_msg in history:
411
+ messages.append({"role": "user", "content": user_msg})
412
+ messages.append({"role": "assistant", "content": assistant_msg})
413
+ messages.append({"role": "user", "content": message})
414
+
415
+ # Call the model (use model name from imported code)
416
+ response = ""
417
+ for chunk in client.chat.completions.create(
418
+ model="MODEL_NAME_FROM_IMPORTED_CODE",
419
+ messages=messages,
420
+ stream=True,
421
+ max_tokens=1024,
422
+ ):
423
+ if chunk.choices[0].delta.content:
424
+ response += chunk.choices[0].delta.content
425
+ yield response
426
+
427
+ demo = gr.ChatInterface(respond, title="Chatbot", description="Chat with the model")
428
+ demo.launch()
429
+ ```
430
+
431
+ **For Transformers/Diffusers Code:**
432
+ - Extract model loading and inference logic
433
+ - Wrap it in appropriate Gradio interface
434
+ - For chat models: use gr.ChatInterface
435
+ - For image generation: use gr.Interface with image output
436
+ - For other tasks: choose appropriate interface type
437
+ - Include proper error handling and loading states
438
+
439
+ **Key Requirements:**
440
+ 1. βœ… ALWAYS create a complete Gradio application, not just inference code
441
+ 2. βœ… Extract model configuration from imported code
442
+ 3. βœ… Use appropriate Gradio interface for the task
443
+ 4. βœ… Include demo.launch() at the end
444
+ 5. βœ… Add requirements.txt with necessary dependencies
445
+
446
  ## Multi-File Application Structure
447
 
448
  When creating complex Gradio applications, organize your code into multiple files for better maintainability:
 
1346
  search_prompt += fastrtc_section
1347
 
1348
  # Update the prompts in the prompts module
1349
+ final_instructions = """\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns.
1350
+
1351
+ πŸ” BEFORE GENERATING: Review the conversation history carefully. If the user has imported any model code (InferenceClient, transformers, diffusers), you MUST integrate that code into your Gradio application. Do not generate standalone inference code - create a complete Gradio app that wraps the imported model functionality.
1352
+
1353
+ IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
1354
+
1355
+ prompts.GRADIO_SYSTEM_PROMPT = base_prompt + docs_content + final_instructions
1356
+ prompts.GRADIO_SYSTEM_PROMPT_WITH_SEARCH = search_prompt + docs_content + final_instructions
1357
 
1358
  def update_json_system_prompts():
1359
  """Update the global JSON system prompts with latest ComfyUI documentation"""