import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch # Load model model_name = "textattack/bert-base-uncased-SST-2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) def analyze_sentiment(text): inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) with torch.no_grad(): outputs = model(**inputs) probs = torch.softmax(outputs.logits, dim=1) return { "POSITIVE": float(probs[0][1]), "NEGATIVE": float(probs[0][0]) } # Create interface demo = gr.Interface( fn=analyze_sentiment, inputs=gr.Textbox(label="Input text", placeholder="Enter text here..."), outputs=gr.Label(label="Sentiment Probabilities"), examples=[ ["I love this product!"], ["This was terrible experience"], ["It was okay, nothing special"] ], title="BERT Sentiment Analysis", description="Predicts sentiment using BERT model fine-tuned on SST-2 dataset" ) demo.launch()