fw7th commited on
Commit
32c4fe4
·
1 Parent(s): 9292dcb

Fixed NLP issue

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -35,8 +35,8 @@ def classify_toxic(text):
35
  batch = tokenizer.encode(text, return_tensors="pt")
36
  output = model(batch).logits
37
  probabilities = torch.nn.functional.softmax(output, dim=-1)
38
- print(f"Output: {probabilities}")
39
- return output
40
 
41
 
42
  # -----------------------
@@ -169,4 +169,4 @@ with gr.Blocks(
169
  )
170
 
171
  if __name__ == "__main__":
172
- demo.launch(share=True)
 
35
  batch = tokenizer.encode(text, return_tensors="pt")
36
  output = model(batch).logits
37
  probabilities = torch.nn.functional.softmax(output, dim=-1)
38
+ probabilities = probabilities.tolist()
39
+ return "Toxic" if probabilities[0] <= 0.55 else "Safe"
40
 
41
 
42
  # -----------------------
 
169
  )
170
 
171
  if __name__ == "__main__":
172
+ demo.launch()