adding hate
Browse files
app.py
CHANGED
|
@@ -10,6 +10,7 @@ SPAM_MODEL = "valurank/distilroberta-spam-comments-detection"
|
|
| 10 |
TOXIC_MODEL = "s-nlp/roberta_toxicity_classifier"
|
| 11 |
SENTIMENT_MODEL = "nlptown/bert-base-multilingual-uncased-sentiment"
|
| 12 |
NSFW_MODEL = "michellejieli/NSFW_text_classifier"
|
|
|
|
| 13 |
|
| 14 |
spam = pipeline("text-classification", model=SPAM_MODEL)
|
| 15 |
|
|
@@ -19,6 +20,8 @@ sentiment = pipeline("text-classification", model = SENTIMENT_MODEL)
|
|
| 19 |
|
| 20 |
nsfw = pipeline("text-classification", model = NSFW_MODEL)
|
| 21 |
|
|
|
|
|
|
|
| 22 |
|
| 23 |
app = FastAPI()
|
| 24 |
|
|
@@ -49,6 +52,12 @@ def predict_nsfw(query: Query):
|
|
| 49 |
result = nsfw(query.text)[0]
|
| 50 |
return {"label": result["label"], "score": result["score"]}
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
@app.get("/health")
|
| 53 |
def health_check():
|
| 54 |
|
|
|
|
| 10 |
TOXIC_MODEL = "s-nlp/roberta_toxicity_classifier"
|
| 11 |
SENTIMENT_MODEL = "nlptown/bert-base-multilingual-uncased-sentiment"
|
| 12 |
NSFW_MODEL = "michellejieli/NSFW_text_classifier"
|
| 13 |
+
HATE_MODEL = "facebook/roberta-hate-speech-dynabench-r4-target"
|
| 14 |
|
| 15 |
spam = pipeline("text-classification", model=SPAM_MODEL)
|
| 16 |
|
|
|
|
| 20 |
|
| 21 |
nsfw = pipeline("text-classification", model = NSFW_MODEL)
|
| 22 |
|
| 23 |
+
hate = pipeline("text-classification", model = HATE_MODEL)
|
| 24 |
+
|
| 25 |
|
| 26 |
app = FastAPI()
|
| 27 |
|
|
|
|
| 52 |
result = nsfw(query.text)[0]
|
| 53 |
return {"label": result["label"], "score": result["score"]}
|
| 54 |
|
| 55 |
+
@app.post("/hate")
|
| 56 |
+
def predict_hate(query: Query):
|
| 57 |
+
result = hate(query.text)[0]
|
| 58 |
+
return {"label": result["label"], "score": result["score"]}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
@app.get("/health")
|
| 62 |
def health_check():
|
| 63 |
|