Spaces:
Sleeping
Sleeping
| import os | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| from transformers import pipeline | |
| from langdetect import detect, DetectorFactory | |
| # Ensure consistent language detection results | |
| DetectorFactory.seed = 0 | |
| # Set HF cache directory instead of TRANSFORMERS_CACHE | |
| os.environ["HF_HOME"] = "/app/cache" | |
| app = FastAPI() | |
| # Load sentiment analysis models | |
| multilingual_model = pipeline("sentiment-analysis", model="tabularisai/multilingual-sentiment-analysis") | |
| english_model = pipeline("sentiment-analysis", model="siebert/sentiment-roberta-large-english") | |
| class SentimentRequest(BaseModel): | |
| text: str | |
| class SentimentResponse(BaseModel): | |
| original_text: str | |
| language_detected: str | |
| sentiment: str | |
| confidence_score: float | |
| def detect_language(text): | |
| try: | |
| return detect(text) | |
| except: | |
| return "unknown" | |
| def home(): | |
| return {"message": "Sentiment Analysis API is running!"} | |
| def analyze_sentiment(request: SentimentRequest): | |
| text = request.text | |
| language = detect_language(text) | |
| # Choose the appropriate model based on language | |
| if language == "en": | |
| result = english_model(text) | |
| else: | |
| result = multilingual_model(text) | |
| return SentimentResponse( | |
| original_text=text, | |
| language_detected=language, | |
| sentiment=result[0]["label"].lower(), | |
| confidence_score=result[0]["score"], | |
| ) | |