import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Load model and tokenizer model_name = "usef310/flan-t5-small-sentiment" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) def predict_sentiment(text): '''Predict sentiment of input text''' if not text.strip(): return "Please enter some text!" # Prepare input inputs = tokenizer("sentiment: " + text, return_tensors="pt", max_length=256, truncation=True) # Generate prediction outputs = model.generate(**inputs, max_length=8) prediction = tokenizer.decode(outputs[0], skip_special_tokens=True) # Format output sentiment = prediction.upper() emoji = "😊" if "positive" in prediction.lower() else "😞" return f"{emoji} {sentiment}" # Create Gradio interface demo = gr.Interface( fn=predict_sentiment, inputs=gr.Textbox( lines=5, placeholder="Enter a movie review or any text...", label="Text Input" ), outputs=gr.Textbox(label="Sentiment Prediction"), title="🎬 FLAN-T5 Sentiment Analysis", description="Fine-tuned FLAN-T5-Small for sentiment classification. Enter any text to get positive/negative prediction!", examples=[ ["This movie was absolutely fantastic! I loved every minute of it."], ["Terrible film. Complete waste of time and money."], ["The acting was superb and the plot kept me engaged throughout."], ["I didn't enjoy this movie at all. Very disappointing."], ["An incredible masterpiece that everyone should watch!"] ], theme="soft" ) if __name__ == "__main__": demo.launch()