Fixed NLP issue
Browse files
app.py
CHANGED
|
@@ -35,8 +35,8 @@ def classify_toxic(text):
|
|
| 35 |
batch = tokenizer.encode(text, return_tensors="pt")
|
| 36 |
output = model(batch).logits
|
| 37 |
probabilities = torch.nn.functional.softmax(output, dim=-1)
|
| 38 |
-
|
| 39 |
-
return
|
| 40 |
|
| 41 |
|
| 42 |
# -----------------------
|
|
@@ -169,4 +169,4 @@ with gr.Blocks(
|
|
| 169 |
)
|
| 170 |
|
| 171 |
if __name__ == "__main__":
|
| 172 |
-
demo.launch(
|
|
|
|
| 35 |
batch = tokenizer.encode(text, return_tensors="pt")
|
| 36 |
output = model(batch).logits
|
| 37 |
probabilities = torch.nn.functional.softmax(output, dim=-1)
|
| 38 |
+
probabilities = probabilities.tolist()
|
| 39 |
+
return "Toxic" if probabilities[0] <= 0.55 else "Safe"
|
| 40 |
|
| 41 |
|
| 42 |
# -----------------------
|
|
|
|
| 169 |
)
|
| 170 |
|
| 171 |
if __name__ == "__main__":
|
| 172 |
+
demo.launch()
|