Shenkezs's picture
Update app.py
42c8b96 verified
'''
import gradio as gr
from transformers import AutoTokenizer, AutoModelForTokenClassification
import torch
#Load model and tokenizer
#model_name = "Shenkezs/nbme-deberta-v3-finetuned"
#tokenizer_name = "microsoft/deberta-v3-base"
model_name = "smeoni/nbme-Bio_ClinicalBERT"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForTokenClassification.from_pretrained(model_name)
#tokenizer = AutoTokenizer.from_pretrained(model_name)
#model = AutoModelForTokenClassification.from_pretrained(model_name)
# Define prediction function
def predict(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True)
with torch.no_grad():
outputs = model(**inputs).logits
predictions = torch.argmax(outputs, dim=-1)
tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
labels = predictions[0].tolist()
return {t: l for t, l in zip(tokens, labels)}
# Create Gradio Interface
demo = gr.Interface(
fn=predict,
inputs=gr.Textbox(lines=3, placeholder="Enter patient note or text...."),
outputs="json",
title="NBME DeBERTa Fine-tuned Model",
description="Predicts token-level classifications from the fine-tuned NBME model."
)
if __name__ == "__main__":
demo.launch()
'''
import gradio as gr
from transformers import pipeline
# Load model pipeline
pipe = pipeline("text-generation", model="Orion-zhen/Qwen2.5-7B-Instruct-Uncensored")
# Chat function
def chat_fn(message, history):
response = pipe(message, max_new_tokens=250, do_sample=True, temperature=0.7)[0]['generated_text']
return response.strip()
# Custom theme and layout
with gr.Blocks(
title="NBME DeBERTa Finetuned Assistant",
theme=gr.themes.Soft(primary_hue="blue"),
css="""
.gradio-container {
font-family: 'Inter', sans-serif;
background-color: white;
color: #1a1a1a;
}
.gr-button {
border-radius: 8px !important;
}
#chatbot {
background-color: white;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.05);
}
.svelte-drum-input textarea, .svelte-drum-input input {
background-color: white !important;
color: black !important;
}
.svelte-drum-examples button {
background-color: white !important;
color: black !important;
border: 1px solid #ccc !important;
}
.svelte-drum-examples button:hover {
background-color: #f5f5f5 !important;
}
footer {visibility: hidden;}
"""
) as demo:
with gr.Row():
with gr.Column(scale=1, min_width=250):
with gr.Group():
gr.Markdown("""
### 🧠 NBME DeBERTa Finetuned Chatbot
A research-focused conversational assistant powered by a fine-tuned transformer model.
Use this app to explore clinical, academic, or conceptual discussions.
""")
gr.Markdown("**Sign in** to use this API with your Hugging Face account.")
login_button = gr.LoginButton("πŸ” Sign in with Hugging Face")
gr.Markdown("---")
gr.Markdown("""
#### About
This chatbot is based on a fine-tuned DeBERTa model trained on the NBME dataset.
It supports text-based reasoning, knowledge queries, and conceptual explanations.
""")
gr.Markdown("[πŸ“„ View Model Card](https://huggingface.co/Shenkezs/nbme-deberta-v3-finetuned)")
with gr.Column(scale=3):
gr.ChatInterface(
fn=chat_fn,
title="Chat Interface",
description="Engage in research-level discussions or ask general knowledge questions.",
chatbot=gr.Chatbot(label="Academic Research Assistant"),
examples=[
["Explain gravity to a 5-year-old."],
["What were the main causes of World War I?"],
["Tell me a joke about calculus."],
["What is the capital of Pakistan?"]
]
)
demo.launch(share=True)