Spaces:
Sleeping
Sleeping
File size: 4,164 Bytes
d9a20ba 5c96465 d682d25 11d5260 381c74b c719bf8 00f5510 5c96465 3c24632 381c74b 5c96465 9150c13 5c96465 d9a20ba 99c65c5 d9a20ba 42c8b96 d9a20ba 99c65c5 cfec215 d9a20ba 2029f21 d9a20ba 4939335 d9a20ba 15f1176 d9a20ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
'''
import gradio as gr
from transformers import AutoTokenizer, AutoModelForTokenClassification
import torch
#Load model and tokenizer
#model_name = "Shenkezs/nbme-deberta-v3-finetuned"
#tokenizer_name = "microsoft/deberta-v3-base"
model_name = "smeoni/nbme-Bio_ClinicalBERT"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForTokenClassification.from_pretrained(model_name)
#tokenizer = AutoTokenizer.from_pretrained(model_name)
#model = AutoModelForTokenClassification.from_pretrained(model_name)
# Define prediction function
def predict(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True)
with torch.no_grad():
outputs = model(**inputs).logits
predictions = torch.argmax(outputs, dim=-1)
tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
labels = predictions[0].tolist()
return {t: l for t, l in zip(tokens, labels)}
# Create Gradio Interface
demo = gr.Interface(
fn=predict,
inputs=gr.Textbox(lines=3, placeholder="Enter patient note or text...."),
outputs="json",
title="NBME DeBERTa Fine-tuned Model",
description="Predicts token-level classifications from the fine-tuned NBME model."
)
if __name__ == "__main__":
demo.launch()
'''
import gradio as gr
from transformers import pipeline
# Load model pipeline
pipe = pipeline("text-generation", model="Orion-zhen/Qwen2.5-7B-Instruct-Uncensored")
# Chat function
def chat_fn(message, history):
response = pipe(message, max_new_tokens=250, do_sample=True, temperature=0.7)[0]['generated_text']
return response.strip()
# Custom theme and layout
with gr.Blocks(
title="NBME DeBERTa Finetuned Assistant",
theme=gr.themes.Soft(primary_hue="blue"),
css="""
.gradio-container {
font-family: 'Inter', sans-serif;
background-color: white;
color: #1a1a1a;
}
.gr-button {
border-radius: 8px !important;
}
#chatbot {
background-color: white;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.05);
}
.svelte-drum-input textarea, .svelte-drum-input input {
background-color: white !important;
color: black !important;
}
.svelte-drum-examples button {
background-color: white !important;
color: black !important;
border: 1px solid #ccc !important;
}
.svelte-drum-examples button:hover {
background-color: #f5f5f5 !important;
}
footer {visibility: hidden;}
"""
) as demo:
with gr.Row():
with gr.Column(scale=1, min_width=250):
with gr.Group():
gr.Markdown("""
### 🧠 NBME DeBERTa Finetuned Chatbot
A research-focused conversational assistant powered by a fine-tuned transformer model.
Use this app to explore clinical, academic, or conceptual discussions.
""")
gr.Markdown("**Sign in** to use this API with your Hugging Face account.")
login_button = gr.LoginButton("🔐 Sign in with Hugging Face")
gr.Markdown("---")
gr.Markdown("""
#### About
This chatbot is based on a fine-tuned DeBERTa model trained on the NBME dataset.
It supports text-based reasoning, knowledge queries, and conceptual explanations.
""")
gr.Markdown("[📄 View Model Card](https://huggingface.co/Shenkezs/nbme-deberta-v3-finetuned)")
with gr.Column(scale=3):
gr.ChatInterface(
fn=chat_fn,
title="Chat Interface",
description="Engage in research-level discussions or ask general knowledge questions.",
chatbot=gr.Chatbot(label="Academic Research Assistant"),
examples=[
["Explain gravity to a 5-year-old."],
["What were the main causes of World War I?"],
["Tell me a joke about calculus."],
["What is the capital of Pakistan?"]
]
)
demo.launch(share=True) |