fw7th commited on
Commit
9589a47
·
1 Parent(s): 32c4fe4

Testing new feature

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -35,8 +35,10 @@ def classify_toxic(text):
35
  batch = tokenizer.encode(text, return_tensors="pt")
36
  output = model(batch).logits
37
  probabilities = torch.nn.functional.softmax(output, dim=-1)
38
- probabilities = probabilities.tolist()
39
- return "Toxic" if probabilities[0] <= 0.55 else "Safe"
 
 
40
 
41
 
42
  # -----------------------
 
35
  batch = tokenizer.encode(text, return_tensors="pt")
36
  output = model(batch).logits
37
  probabilities = torch.nn.functional.softmax(output, dim=-1)
38
+ preds = probabilities.tolist()
39
+ print(f"Preds: {preds}")
40
+ return "Safe"
41
+ # return "Toxic" if preds[0] <= 0.55 else "Safe"
42
 
43
 
44
  # -----------------------