khairul91 commited on
Commit
32b3632
·
verified ·
1 Parent(s): ddf45da

Delete backend_api.py

Browse files
Files changed (1) hide show
  1. backend_api.py +0 -805
backend_api.py DELETED
@@ -1,805 +0,0 @@
1
- """
2
- FastAPI backend for AnyCoder - provides REST API endpoints
3
- """
4
- from fastapi import FastAPI, HTTPException, Header, WebSocket, WebSocketDisconnect, Request, Response
5
- from fastapi.middleware.cors import CORSMiddleware
6
- from fastapi.responses import StreamingResponse, RedirectResponse, JSONResponse
7
- from pydantic import BaseModel
8
- from typing import Optional, List, Dict, AsyncGenerator
9
- import json
10
- import asyncio
11
- from datetime import datetime
12
- import secrets
13
- import base64
14
- import urllib.parse
15
-
16
- # Import only what we need, avoiding Gradio UI imports
17
- import sys
18
- import os
19
- from huggingface_hub import InferenceClient
20
- import httpx
21
-
22
- # Import model handling from backend_models
23
- from backend_models import (
24
- get_inference_client,
25
- get_real_model_id,
26
- create_gemini3_messages,
27
- is_native_sdk_model,
28
- is_mistral_model
29
- )
30
-
31
- # Import project importer for importing from HF/GitHub
32
- from project_importer import ProjectImporter
33
-
34
- # Import system prompts from standalone backend_prompts.py
35
- # No dependencies on Gradio or heavy libraries
36
- print("[Startup] Loading system prompts from backend_prompts...")
37
-
38
- try:
39
- from backend_prompts import (
40
- HTML_SYSTEM_PROMPT,
41
- TRANSFORMERS_JS_SYSTEM_PROMPT,
42
- STREAMLIT_SYSTEM_PROMPT,
43
- REACT_SYSTEM_PROMPT,
44
- GRADIO_SYSTEM_PROMPT,
45
- JSON_SYSTEM_PROMPT,
46
- GENERIC_SYSTEM_PROMPT
47
- )
48
- print("[Startup] ✅ All system prompts loaded successfully from backend_prompts.py")
49
- except Exception as e:
50
- import traceback
51
- print(f"[Startup] ❌ ERROR: Could not import from backend_prompts: {e}")
52
- print(f"[Startup] Traceback: {traceback.format_exc()}")
53
- print("[Startup] Using minimal fallback prompts")
54
-
55
- # Define minimal fallback prompts
56
- HTML_SYSTEM_PROMPT = "You are an expert web developer. Create complete HTML applications with CSS and JavaScript."
57
- TRANSFORMERS_JS_SYSTEM_PROMPT = "You are an expert at creating transformers.js applications. Generate complete working code."
58
- STREAMLIT_SYSTEM_PROMPT = "You are an expert Streamlit developer. Create complete Streamlit applications."
59
- REACT_SYSTEM_PROMPT = "You are an expert React developer. Create complete React applications with Next.js."
60
- GRADIO_SYSTEM_PROMPT = "You are an expert Gradio developer. Create complete, working Gradio applications."
61
- JSON_SYSTEM_PROMPT = "You are an expert at generating JSON configurations. Create valid, well-structured JSON."
62
- GENERIC_SYSTEM_PROMPT = "You are an expert {language} developer. Create complete, working {language} applications."
63
-
64
- print("[Startup] System prompts initialization complete")
65
-
66
- # Define models and languages here to avoid importing Gradio UI
67
- AVAILABLE_MODELS = [
68
- {"name": "Gemini 3.0 Pro", "id": "gemini-3.0-pro", "description": "Google Gemini 3.0 Pro via Poe with advanced reasoning"},
69
- {"name": "Sherlock Dash Alpha", "id": "openrouter/sherlock-dash-alpha", "description": "Sherlock Dash Alpha model via OpenRouter"},
70
- {"name": "MiniMax M2", "id": "MiniMaxAI/MiniMax-M2", "description": "MiniMax M2 model via HuggingFace InferenceClient with Novita provider"},
71
- {"name": "DeepSeek V3.2-Exp", "id": "deepseek-ai/DeepSeek-V3.2-Exp", "description": "DeepSeek V3.2 Experimental via HuggingFace"},
72
- {"name": "DeepSeek R1", "id": "deepseek-ai/DeepSeek-R1-0528", "description": "DeepSeek R1 model for code generation"},
73
- {"name": "GPT-5", "id": "gpt-5", "description": "OpenAI GPT-5 via OpenRouter"},
74
- {"name": "Gemini Flash Latest", "id": "gemini-flash-latest", "description": "Google Gemini Flash via OpenRouter"},
75
- {"name": "Qwen3 Max Preview", "id": "qwen3-max-preview", "description": "Qwen3 Max Preview via DashScope API"},
76
- ]
77
-
78
- LANGUAGE_CHOICES = ["html", "gradio", "transformers.js", "streamlit", "comfyui", "react"]
79
-
80
- app = FastAPI(title="AnyCoder API", version="1.0.0")
81
-
82
- # OAuth and environment configuration (must be before CORS)
83
- OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID", "")
84
- OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET", "")
85
- OAUTH_SCOPES = os.getenv("OAUTH_SCOPES", "openid profile manage-repos")
86
- OPENID_PROVIDER_URL = os.getenv("OPENID_PROVIDER_URL", "https://huggingface.co")
87
- SPACE_HOST = os.getenv("SPACE_HOST", "localhost:7860")
88
-
89
- # Configure CORS - allow all origins in production, specific in dev
90
- # In Docker Space, requests come from the same domain via Next.js proxy
91
- ALLOWED_ORIGINS = os.getenv("ALLOWED_ORIGINS", "*").split(",") if os.getenv("ALLOWED_ORIGINS") else [
92
- "http://localhost:3000",
93
- "http://localhost:3001",
94
- "http://localhost:7860",
95
- f"https://{SPACE_HOST}" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else "http://localhost:7860"
96
- ]
97
-
98
- app.add_middleware(
99
- CORSMiddleware,
100
- allow_origins=ALLOWED_ORIGINS if ALLOWED_ORIGINS != ["*"] else ["*"],
101
- allow_credentials=True,
102
- allow_methods=["*"],
103
- allow_headers=["*"],
104
- allow_origin_regex=r"https://.*\.hf\.space" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else None,
105
- )
106
-
107
- # In-memory store for OAuth states (in production, use Redis or similar)
108
- oauth_states = {}
109
-
110
- # In-memory store for user sessions
111
- user_sessions = {}
112
-
113
-
114
- # Pydantic models for request/response
115
- class CodeGenerationRequest(BaseModel):
116
- query: str
117
- language: str = "html"
118
- model_id: str = "gemini-3.0-pro"
119
- provider: str = "auto"
120
- history: List[List[str]] = []
121
- agent_mode: bool = False
122
-
123
-
124
- class DeploymentRequest(BaseModel):
125
- code: str
126
- space_name: Optional[str] = None
127
- language: str
128
- requirements: Optional[str] = None
129
- existing_repo_id: Optional[str] = None # For updating existing spaces
130
- commit_message: Optional[str] = None
131
-
132
-
133
- class AuthStatus(BaseModel):
134
- authenticated: bool
135
- username: Optional[str] = None
136
- message: str
137
-
138
-
139
- class ModelInfo(BaseModel):
140
- name: str
141
- id: str
142
- description: str
143
-
144
-
145
- class CodeGenerationResponse(BaseModel):
146
- code: str
147
- history: List[List[str]]
148
- status: str
149
-
150
-
151
- class ImportRequest(BaseModel):
152
- url: str
153
- prefer_local: bool = False
154
-
155
-
156
- class ImportResponse(BaseModel):
157
- status: str
158
- message: str
159
- code: str
160
- language: str
161
- url: str
162
- metadata: Dict
163
-
164
-
165
- # Mock authentication for development
166
- # In production, integrate with HuggingFace OAuth
167
- class MockAuth:
168
- def __init__(self, token: Optional[str] = None, username: Optional[str] = None):
169
- self.token = token
170
- self.username = username
171
-
172
- def is_authenticated(self):
173
- return bool(self.token)
174
-
175
-
176
- def get_auth_from_header(authorization: Optional[str] = None):
177
- """Extract authentication from header or session token"""
178
- if not authorization:
179
- return MockAuth(None, None)
180
-
181
- # Handle "Bearer " prefix
182
- if authorization.startswith("Bearer "):
183
- token = authorization.replace("Bearer ", "")
184
- else:
185
- token = authorization
186
-
187
- # Check if this is a session token (UUID format)
188
- if token and "-" in token and len(token) > 20:
189
- # Look up the session to get user info
190
- if token in user_sessions:
191
- session = user_sessions[token]
192
- return MockAuth(session["access_token"], session["username"])
193
-
194
- # Dev token format: dev_token_<username>_<timestamp>
195
- if token and token.startswith("dev_token_"):
196
- parts = token.split("_")
197
- username = parts[2] if len(parts) > 2 else "user"
198
- return MockAuth(token, username)
199
-
200
- # Regular token (OAuth access token passed directly)
201
- return MockAuth(token, None)
202
-
203
-
204
- @app.get("/")
205
- async def root():
206
- """Health check endpoint"""
207
- return {"status": "ok", "message": "AnyCoder API is running"}
208
-
209
-
210
- @app.get("/api/models", response_model=List[ModelInfo])
211
- async def get_models():
212
- """Get available AI models"""
213
- return [
214
- ModelInfo(
215
- name=model["name"],
216
- id=model["id"],
217
- description=model["description"]
218
- )
219
- for model in AVAILABLE_MODELS
220
- ]
221
-
222
-
223
- @app.get("/api/languages")
224
- async def get_languages():
225
- """Get available programming languages/frameworks"""
226
- return {"languages": LANGUAGE_CHOICES}
227
-
228
-
229
- @app.get("/api/auth/login")
230
- async def oauth_login(request: Request):
231
- """Initiate OAuth login flow"""
232
- # Generate a random state to prevent CSRF
233
- state = secrets.token_urlsafe(32)
234
- oauth_states[state] = {"timestamp": datetime.now()}
235
-
236
- # Build redirect URI
237
- protocol = "https" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else "http"
238
- redirect_uri = f"{protocol}://{SPACE_HOST}/api/auth/callback"
239
-
240
- # Build authorization URL
241
- auth_url = (
242
- f"{OPENID_PROVIDER_URL}/oauth/authorize"
243
- f"?client_id={OAUTH_CLIENT_ID}"
244
- f"&redirect_uri={urllib.parse.quote(redirect_uri)}"
245
- f"&scope={urllib.parse.quote(OAUTH_SCOPES)}"
246
- f"&state={state}"
247
- f"&response_type=code"
248
- )
249
-
250
- return JSONResponse({"login_url": auth_url, "state": state})
251
-
252
-
253
- @app.get("/api/auth/callback")
254
- async def oauth_callback(code: str, state: str, request: Request):
255
- """Handle OAuth callback"""
256
- # Verify state to prevent CSRF
257
- if state not in oauth_states:
258
- raise HTTPException(status_code=400, detail="Invalid state parameter")
259
-
260
- # Clean up old states
261
- oauth_states.pop(state, None)
262
-
263
- # Exchange code for tokens
264
- protocol = "https" if SPACE_HOST and not SPACE_HOST.startswith("localhost") else "http"
265
- redirect_uri = f"{protocol}://{SPACE_HOST}/api/auth/callback"
266
-
267
- # Prepare authorization header
268
- auth_string = f"{OAUTH_CLIENT_ID}:{OAUTH_CLIENT_SECRET}"
269
- auth_bytes = auth_string.encode('utf-8')
270
- auth_b64 = base64.b64encode(auth_bytes).decode('utf-8')
271
-
272
- async with httpx.AsyncClient() as client:
273
- try:
274
- token_response = await client.post(
275
- f"{OPENID_PROVIDER_URL}/oauth/token",
276
- data={
277
- "client_id": OAUTH_CLIENT_ID,
278
- "code": code,
279
- "grant_type": "authorization_code",
280
- "redirect_uri": redirect_uri,
281
- },
282
- headers={
283
- "Authorization": f"Basic {auth_b64}",
284
- "Content-Type": "application/x-www-form-urlencoded",
285
- },
286
- )
287
- token_response.raise_for_status()
288
- token_data = token_response.json()
289
-
290
- # Get user info
291
- access_token = token_data.get("access_token")
292
- userinfo_response = await client.get(
293
- f"{OPENID_PROVIDER_URL}/oauth/userinfo",
294
- headers={"Authorization": f"Bearer {access_token}"},
295
- )
296
- userinfo_response.raise_for_status()
297
- user_info = userinfo_response.json()
298
-
299
- # Create session
300
- session_token = secrets.token_urlsafe(32)
301
- user_sessions[session_token] = {
302
- "access_token": access_token,
303
- "user_info": user_info,
304
- "timestamp": datetime.now(),
305
- "username": user_info.get("name") or user_info.get("preferred_username") or "user",
306
- "deployed_spaces": [] # Track deployed spaces for follow-up updates
307
- }
308
-
309
- # Redirect to frontend with session token
310
- frontend_url = f"{protocol}://{SPACE_HOST}/?session={session_token}"
311
- return RedirectResponse(url=frontend_url)
312
-
313
- except httpx.HTTPError as e:
314
- print(f"OAuth error: {e}")
315
- raise HTTPException(status_code=500, detail=f"OAuth failed: {str(e)}")
316
-
317
-
318
- @app.get("/api/auth/session")
319
- async def get_session(session: str):
320
- """Get user info from session token"""
321
- if session not in user_sessions:
322
- raise HTTPException(status_code=401, detail="Invalid session")
323
-
324
- session_data = user_sessions[session]
325
- return {
326
- "access_token": session_data["access_token"],
327
- "user_info": session_data["user_info"],
328
- }
329
-
330
-
331
- @app.get("/api/auth/status")
332
- async def auth_status(authorization: Optional[str] = Header(None)):
333
- """Check authentication status"""
334
- auth = get_auth_from_header(authorization)
335
- if auth.is_authenticated():
336
- return AuthStatus(
337
- authenticated=True,
338
- username=auth.username,
339
- message=f"Authenticated as {auth.username}"
340
- )
341
- return AuthStatus(
342
- authenticated=False,
343
- username=None,
344
- message="Not authenticated"
345
- )
346
-
347
-
348
- @app.get("/api/generate")
349
- async def generate_code(
350
- query: str,
351
- language: str = "html",
352
- model_id: str = "openrouter/sherlock-dash-alpha",
353
- provider: str = "auto",
354
- authorization: Optional[str] = Header(None)
355
- ):
356
- """Generate code based on user query - returns streaming response"""
357
- # Dev mode: No authentication required - just use server's HF_TOKEN
358
- # In production, you would check real OAuth tokens here
359
-
360
- async def event_stream() -> AsyncGenerator[str, None]:
361
- """Stream generated code chunks"""
362
- # Use the model_id from outer scope
363
- selected_model_id = model_id
364
-
365
- try:
366
- # Find the selected model
367
- selected_model = None
368
- for model in AVAILABLE_MODELS:
369
- if model["id"] == selected_model_id:
370
- selected_model = model
371
- break
372
-
373
- if not selected_model:
374
- selected_model = AVAILABLE_MODELS[0]
375
- selected_model_id = selected_model["id"]
376
-
377
- # Track generated code
378
- generated_code = ""
379
-
380
- # Select appropriate system prompt based on language
381
- prompt_map = {
382
- "html": HTML_SYSTEM_PROMPT,
383
- "gradio": GRADIO_SYSTEM_PROMPT,
384
- "streamlit": STREAMLIT_SYSTEM_PROMPT,
385
- "transformers.js": TRANSFORMERS_JS_SYSTEM_PROMPT,
386
- "react": REACT_SYSTEM_PROMPT,
387
- "comfyui": JSON_SYSTEM_PROMPT,
388
- }
389
- system_prompt = prompt_map.get(language, GENERIC_SYSTEM_PROMPT.format(language=language))
390
-
391
- print(f"[Generate] Using {language} prompt for query: {query[:100]}...")
392
-
393
- # Get the client using backend_models
394
- print(f"[Generate] Getting client for model: {selected_model_id}")
395
- client = get_inference_client(selected_model_id, provider)
396
-
397
- # Get the real model ID with provider suffixes
398
- actual_model_id = get_real_model_id(selected_model_id)
399
- print(f"[Generate] Using model ID: {actual_model_id}")
400
-
401
- # Prepare messages
402
- messages = [
403
- {"role": "system", "content": system_prompt},
404
- {"role": "user", "content": f"Generate a {language} application: {query}"}
405
- ]
406
-
407
- # Stream the response
408
- try:
409
- # Handle Mistral models with different API
410
- if is_mistral_model(selected_model_id):
411
- print("[Generate] Using Mistral SDK")
412
- stream = client.chat.stream(
413
- model=actual_model_id,
414
- messages=messages,
415
- max_tokens=10000
416
- )
417
-
418
- # All other models use OpenAI-compatible API
419
- else:
420
- stream = client.chat.completions.create(
421
- model=actual_model_id,
422
- messages=messages,
423
- temperature=0.7,
424
- max_tokens=10000,
425
- stream=True
426
- )
427
-
428
- chunk_count = 0
429
- print(f"[Generate] Starting to stream from {actual_model_id}...")
430
-
431
- for chunk in stream:
432
- # Handle different response formats
433
- chunk_content = None
434
-
435
- if is_mistral_model(selected_model_id):
436
- # Mistral format: chunk.data.choices[0].delta.content
437
- if (hasattr(chunk, "data") and chunk.data and
438
- hasattr(chunk.data, "choices") and chunk.data.choices and
439
- hasattr(chunk.data.choices[0], "delta") and
440
- hasattr(chunk.data.choices[0].delta, "content") and
441
- chunk.data.choices[0].delta.content is not None):
442
- chunk_content = chunk.data.choices[0].delta.content
443
- else:
444
- # OpenAI format: chunk.choices[0].delta.content
445
- if (hasattr(chunk, 'choices') and
446
- chunk.choices and
447
- len(chunk.choices) > 0 and
448
- hasattr(chunk.choices[0], 'delta') and
449
- hasattr(chunk.choices[0].delta, 'content') and
450
- chunk.choices[0].delta.content):
451
- chunk_content = chunk.choices[0].delta.content
452
-
453
- if chunk_content:
454
- content = chunk_content
455
- generated_code += content
456
- chunk_count += 1
457
-
458
- # Log every 10th chunk to avoid spam
459
- if chunk_count % 10 == 0:
460
- print(f"[Generate] Streamed {chunk_count} chunks, {len(generated_code)} chars total")
461
-
462
- # Send chunk as Server-Sent Event - yield immediately for instant streaming
463
- event_data = json.dumps({
464
- "type": "chunk",
465
- "content": content,
466
- "timestamp": datetime.now().isoformat()
467
- })
468
- yield f"data: {event_data}\n\n"
469
-
470
- # Yield control to allow async processing - no artificial delay
471
- await asyncio.sleep(0)
472
-
473
- print(f"[Generate] Completed with {chunk_count} chunks, total length: {len(generated_code)}")
474
-
475
- # Send completion event
476
- completion_data = json.dumps({
477
- "type": "complete",
478
- "code": generated_code,
479
- "timestamp": datetime.now().isoformat()
480
- })
481
- yield f"data: {completion_data}\n\n"
482
-
483
- except Exception as e:
484
- error_data = json.dumps({
485
- "type": "error",
486
- "message": str(e),
487
- "timestamp": datetime.now().isoformat()
488
- })
489
- yield f"data: {error_data}\n\n"
490
-
491
- except Exception as e:
492
- error_data = json.dumps({
493
- "type": "error",
494
- "message": f"Generation error: {str(e)}",
495
- "timestamp": datetime.now().isoformat()
496
- })
497
- yield f"data: {error_data}\n\n"
498
-
499
- return StreamingResponse(
500
- event_stream(),
501
- media_type="text/event-stream",
502
- headers={
503
- "Cache-Control": "no-cache, no-transform",
504
- "Connection": "keep-alive",
505
- "X-Accel-Buffering": "no",
506
- "Content-Encoding": "none",
507
- "Transfer-Encoding": "chunked"
508
- }
509
- )
510
-
511
-
512
- @app.post("/api/deploy")
513
- async def deploy(
514
- request: DeploymentRequest,
515
- authorization: Optional[str] = Header(None)
516
- ):
517
- """Deploy generated code to HuggingFace Spaces"""
518
- auth = get_auth_from_header(authorization)
519
-
520
- if not auth.is_authenticated():
521
- raise HTTPException(status_code=401, detail="Authentication required")
522
-
523
- # Check if this is dev mode (no real token)
524
- if auth.token and auth.token.startswith("dev_token_"):
525
- # In dev mode, open HF Spaces creation page
526
- from backend_deploy import detect_sdk_from_code
527
- base_url = "https://huggingface.co/new-space"
528
-
529
- sdk = detect_sdk_from_code(request.code, request.language)
530
-
531
- params = urllib.parse.urlencode({
532
- "name": request.space_name or "my-anycoder-app",
533
- "sdk": sdk
534
- })
535
-
536
- # Prepare file content based on language
537
- if request.language in ["html", "transformers.js", "comfyui"]:
538
- file_path = "index.html"
539
- else:
540
- file_path = "app.py"
541
-
542
- files_params = urllib.parse.urlencode({
543
- "files[0][path]": file_path,
544
- "files[0][content]": request.code
545
- })
546
-
547
- space_url = f"{base_url}?{params}&{files_params}"
548
-
549
- return {
550
- "success": True,
551
- "space_url": space_url,
552
- "message": "Dev mode: Please create the space manually",
553
- "dev_mode": True
554
- }
555
-
556
- # Production mode with real OAuth token
557
- try:
558
- from backend_deploy import deploy_to_huggingface_space
559
-
560
- # Get user token - should be the access_token from OAuth session
561
- user_token = auth.token if auth.token else os.getenv("HF_TOKEN")
562
-
563
- if not user_token:
564
- raise HTTPException(status_code=401, detail="No HuggingFace token available. Please sign in first.")
565
-
566
- print(f"[Deploy] Attempting deployment with token (first 10 chars): {user_token[:10]}...")
567
-
568
- # Check for existing deployed space in this session
569
- existing_repo_id = request.existing_repo_id
570
- session_token = authorization.replace("Bearer ", "") if authorization else None
571
-
572
- # If no existing_repo_id provided, check session for previously deployed spaces
573
- if not existing_repo_id and session_token and session_token in user_sessions:
574
- session = user_sessions[session_token]
575
- deployed_spaces = session.get("deployed_spaces", [])
576
-
577
- # Find the most recent space for this language
578
- for space in reversed(deployed_spaces):
579
- if space.get("language") == request.language:
580
- existing_repo_id = space.get("repo_id")
581
- print(f"[Deploy] Found existing space for {request.language}: {existing_repo_id}")
582
- break
583
-
584
- # Use the standalone deployment function
585
- success, message, space_url = deploy_to_huggingface_space(
586
- code=request.code,
587
- language=request.language,
588
- space_name=request.space_name,
589
- token=user_token,
590
- username=auth.username,
591
- description=request.description if hasattr(request, 'description') else None,
592
- private=False,
593
- existing_repo_id=existing_repo_id,
594
- commit_message=request.commit_message
595
- )
596
-
597
- if success:
598
- # Track deployed space in session for follow-up updates
599
- if session_token and session_token in user_sessions:
600
- repo_id = space_url.split("/spaces/")[-1] if space_url else None
601
- if repo_id:
602
- session = user_sessions[session_token]
603
- deployed_spaces = session.get("deployed_spaces", [])
604
-
605
- # Update or add the space
606
- space_entry = {
607
- "repo_id": repo_id,
608
- "language": request.language,
609
- "timestamp": datetime.now()
610
- }
611
-
612
- # Remove old entry for same repo_id if exists
613
- deployed_spaces = [s for s in deployed_spaces if s.get("repo_id") != repo_id]
614
- deployed_spaces.append(space_entry)
615
-
616
- session["deployed_spaces"] = deployed_spaces
617
- print(f"[Deploy] Tracked space in session: {repo_id}")
618
-
619
- return {
620
- "success": True,
621
- "space_url": space_url,
622
- "message": message,
623
- "repo_id": repo_id if 'repo_id' in locals() else None
624
- }
625
- else:
626
- # Provide user-friendly error message based on the error
627
- if "401" in message or "Unauthorized" in message:
628
- raise HTTPException(
629
- status_code=401,
630
- detail="Authentication failed. Please sign in again with HuggingFace."
631
- )
632
- elif "403" in message or "Forbidden" in message or "Permission" in message:
633
- raise HTTPException(
634
- status_code=403,
635
- detail="Permission denied. Your HuggingFace token may not have the required permissions (manage-repos scope)."
636
- )
637
- else:
638
- raise HTTPException(
639
- status_code=500,
640
- detail=message
641
- )
642
-
643
- except HTTPException:
644
- # Re-raise HTTP exceptions as-is
645
- raise
646
- except Exception as e:
647
- # Log the full error for debugging
648
- import traceback
649
- error_details = traceback.format_exc()
650
- print(f"[Deploy] Deployment error: {error_details}")
651
-
652
- raise HTTPException(
653
- status_code=500,
654
- detail=f"Deployment failed: {str(e)}"
655
- )
656
-
657
-
658
- @app.post("/api/import", response_model=ImportResponse)
659
- async def import_project(request: ImportRequest):
660
- """
661
- Import a project from HuggingFace Space, HuggingFace Model, or GitHub repo
662
-
663
- Supports URLs like:
664
- - https://huggingface.co/spaces/username/space-name
665
- - https://huggingface.co/username/model-name
666
- - https://github.com/username/repo-name
667
- """
668
- try:
669
- importer = ProjectImporter()
670
- result = importer.import_from_url(request.url)
671
-
672
- # Handle model-specific prefer_local flag
673
- if request.prefer_local and result.get('metadata', {}).get('has_alternatives'):
674
- # Switch to local code if available
675
- local_code = result['metadata'].get('local_code')
676
- if local_code:
677
- result['code'] = local_code
678
- result['metadata']['code_type'] = 'local'
679
- result['message'] = result['message'].replace('inference', 'local')
680
-
681
- return ImportResponse(**result)
682
-
683
- except Exception as e:
684
- return ImportResponse(
685
- status="error",
686
- message=f"Import failed: {str(e)}",
687
- code="",
688
- language="unknown",
689
- url=request.url,
690
- metadata={}
691
- )
692
-
693
-
694
- @app.get("/api/import/space/{username}/{space_name}")
695
- async def import_space(username: str, space_name: str):
696
- """Import a specific HuggingFace Space by username and space name"""
697
- try:
698
- importer = ProjectImporter()
699
- result = importer.import_space(username, space_name)
700
- return result
701
- except Exception as e:
702
- return {
703
- "status": "error",
704
- "message": f"Failed to import space: {str(e)}",
705
- "code": "",
706
- "language": "unknown",
707
- "url": f"https://huggingface.co/spaces/{username}/{space_name}",
708
- "metadata": {}
709
- }
710
-
711
-
712
- @app.get("/api/import/model/{path:path}")
713
- async def import_model(path: str, prefer_local: bool = False):
714
- """
715
- Import a specific HuggingFace Model by model ID
716
-
717
- Example: /api/import/model/meta-llama/Llama-3.2-1B-Instruct
718
- """
719
- try:
720
- importer = ProjectImporter()
721
- result = importer.import_model(path, prefer_local=prefer_local)
722
- return result
723
- except Exception as e:
724
- return {
725
- "status": "error",
726
- "message": f"Failed to import model: {str(e)}",
727
- "code": "",
728
- "language": "python",
729
- "url": f"https://huggingface.co/{path}",
730
- "metadata": {}
731
- }
732
-
733
-
734
- @app.get("/api/import/github/{owner}/{repo}")
735
- async def import_github(owner: str, repo: str):
736
- """Import a GitHub repository by owner and repo name"""
737
- try:
738
- importer = ProjectImporter()
739
- result = importer.import_github_repo(owner, repo)
740
- return result
741
- except Exception as e:
742
- return {
743
- "status": "error",
744
- "message": f"Failed to import repository: {str(e)}",
745
- "code": "",
746
- "language": "python",
747
- "url": f"https://github.com/{owner}/{repo}",
748
- "metadata": {}
749
- }
750
-
751
-
752
- @app.websocket("/ws/generate")
753
- async def websocket_generate(websocket: WebSocket):
754
- """WebSocket endpoint for real-time code generation"""
755
- await websocket.accept()
756
-
757
- try:
758
- while True:
759
- # Receive message from client
760
- data = await websocket.receive_json()
761
-
762
- query = data.get("query")
763
- language = data.get("language", "html")
764
- model_id = data.get("model_id", "openrouter/sherlock-dash-alpha")
765
-
766
- # Send acknowledgment
767
- await websocket.send_json({
768
- "type": "status",
769
- "message": "Generating code..."
770
- })
771
-
772
- # Mock code generation for now
773
- await asyncio.sleep(0.5)
774
-
775
- # Send generated code in chunks
776
- sample_code = f"<!-- Generated {language} code -->\n<h1>Hello from AnyCoder!</h1>"
777
-
778
- for i, char in enumerate(sample_code):
779
- await websocket.send_json({
780
- "type": "chunk",
781
- "content": char,
782
- "progress": (i + 1) / len(sample_code) * 100
783
- })
784
- await asyncio.sleep(0.01)
785
-
786
- # Send completion
787
- await websocket.send_json({
788
- "type": "complete",
789
- "code": sample_code
790
- })
791
-
792
- except WebSocketDisconnect:
793
- print("Client disconnected")
794
- except Exception as e:
795
- await websocket.send_json({
796
- "type": "error",
797
- "message": str(e)
798
- })
799
- await websocket.close()
800
-
801
-
802
- if __name__ == "__main__":
803
- import uvicorn
804
- uvicorn.run("backend_api:app", host="0.0.0.0", port=8000, reload=True)
805
-