| """Gradio app that showcases Danish offensive text models.""" | |
| import warnings | |
| from numba.core.errors import NumbaDeprecationWarning | |
| warnings.filterwarnings("ignore", category=NumbaDeprecationWarning) | |
| import gradio as gr | |
| from transformers import pipeline | |
| from typing import Tuple, Dict, List | |
| def main(): | |
| pipe = pipeline( | |
| task="text-classification", | |
| model="alexandrainst/da-offensive-detection-small", | |
| ) | |
| examples = [ | |
| "Din store idiot.", | |
| "Jeg er glad for at være her.", | |
| "Hvem tror du, du er?", | |
| "Har du hæklefejl i kysen?", | |
| "Hej med dig, jeg hedder Peter.", | |
| "Fuck hvor er det dejligt, det her :)", | |
| "🍆", | |
| "😊", | |
| ] | |
| def classification(text) -> Tuple[Dict[str, float], dict]: | |
| output: List[dict] = pipe(text)[0] | |
| print(text, output) | |
| return {output["label"]: output["score"]} | |
| demo = gr.Interface( | |
| fn=classification, | |
| inputs=gr.Textbox(placeholder="Enter sentence here...", value=examples[0]), | |
| outputs=gr.Label(), | |
| examples=examples, | |
| title="Danish Offensive Text Detection", | |
| description=""" | |
| Detect offensive text in Danish. Write any text in the box below, and the model will predict whether the text is offensive or not: | |
| _Also, be patient, as this demo is running on a CPU!_""", | |
| ) | |
| demo.launch() | |
| if __name__ == "__main__": | |
| main() | |