|
|
import gradio as gr |
|
|
import timm |
|
|
import torch |
|
|
from transformers import RobertaForSequenceClassification, RobertaTokenizer |
|
|
|
|
|
"Vision" |
|
|
vit_model = timm.create_model("hf_hub:Marqo/nsfw-image-detection-384", pretrained=True) |
|
|
vit_model = vit_model.eval() |
|
|
|
|
|
data_config = timm.data.resolve_model_data_config(vit_model) |
|
|
transforms = timm.data.create_transform(**data_config, is_training=False) |
|
|
|
|
|
"NLP" |
|
|
tokenizer = RobertaTokenizer.from_pretrained("s-nlp/roberta_toxicity_classifier") |
|
|
model = RobertaForSequenceClassification.from_pretrained( |
|
|
"s-nlp/roberta_toxicity_classifier" |
|
|
) |
|
|
|
|
|
|
|
|
def moderate_image(img): |
|
|
|
|
|
with torch.no_grad(): |
|
|
output = vit_model(transforms(img).unsqueeze(0)).softmax(dim=-1).cpu() |
|
|
|
|
|
class_names = vit_model.pretrained_cfg["label_names"] |
|
|
probabilities = output[0].tolist() |
|
|
if probabilities[0] >= 0.3: |
|
|
return class_names[0] |
|
|
else: |
|
|
return class_names[1] |
|
|
|
|
|
|
|
|
def classify_toxic(text): |
|
|
with torch.no_grad(): |
|
|
batch = tokenizer.encode(text, return_tensors="pt") |
|
|
output = model(batch).logits |
|
|
probabilities = torch.nn.functional.softmax(output, dim=-1) |
|
|
preds = probabilities.tolist() |
|
|
return "Toxic" if preds[0][0] <= 0.55 else "Safe" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
custom_css = """ |
|
|
/* Center container and control width */ |
|
|
.gradio-container { |
|
|
max-width: 900px !important; |
|
|
margin: 0 auto !important; |
|
|
padding: 20px 10px !important; |
|
|
} |
|
|
|
|
|
/* Header styling */ |
|
|
.clean-title { |
|
|
font-size: 1.9rem; |
|
|
font-weight: 600; |
|
|
text-align: center; |
|
|
margin-bottom: 1.2rem; |
|
|
letter-spacing: -0.4px; |
|
|
} |
|
|
|
|
|
/* Apple-like card sections */ |
|
|
.apple-card { |
|
|
padding: 18px; |
|
|
border-radius: 12px; |
|
|
border: 1px solid rgba(var(--block-border-color-rgb), 0.14); |
|
|
background: var(--block-background-fill); |
|
|
box-shadow: 0 1px 3px rgba(0,0,0,0.04); |
|
|
margin-bottom: 18px; |
|
|
} |
|
|
|
|
|
/* Button styling: clean, flat, subtle */ |
|
|
.gr-button { |
|
|
border-radius: 8px !important; |
|
|
background: var(--button-secondary-background-fill) !important; |
|
|
border: 1px solid rgba(var(--block-border-color-rgb), 0.22) !important; |
|
|
transition: 0.2s ease !important; |
|
|
} |
|
|
|
|
|
.gr-button:hover { |
|
|
background: var(--button-secondary-background-fill-hover) !important; |
|
|
border-color: rgba(var(--block-border-color-rgb), 0.34) !important; |
|
|
} |
|
|
|
|
|
.gr-button:active { |
|
|
background: var(--button-secondary-background-fill-pressed) !important; |
|
|
} |
|
|
|
|
|
/* Reduce blank space between elements */ |
|
|
.gr-block { |
|
|
margin: 6px 0 !important; |
|
|
} |
|
|
|
|
|
/* Label style */ |
|
|
label { |
|
|
font-weight: 500 !important; |
|
|
} |
|
|
|
|
|
/* Make body fill full height so footer can stick */ |
|
|
body, .gradio-container { |
|
|
min-height: 100vh !important; |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
} |
|
|
|
|
|
/* Main content should expand, footer sits at bottom */ |
|
|
.main-content { |
|
|
flex: 1 0 auto; |
|
|
} |
|
|
|
|
|
.footer-custom { |
|
|
flex-shrink: 0; |
|
|
text-align: center; |
|
|
font-size: 0.80rem; |
|
|
opacity: 0.6; |
|
|
padding: 14px 0; |
|
|
border-top: 1px solid rgba(var(--block-border-color-rgb), 0.12); |
|
|
margin-top: 25px; |
|
|
} |
|
|
|
|
|
footer {display: none !important} |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks( |
|
|
theme=gr.themes.Soft(primary_hue="violet", secondary_hue="slate"), css=custom_css |
|
|
) as demo: |
|
|
with gr.Column(elem_classes="main-content"): |
|
|
gr.Markdown("<div class='clean-title'>Content Safety Demo</div>") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
with gr.Tab("NSFW Image Detection"): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=3): |
|
|
with gr.Group(elem_classes="apple-card"): |
|
|
img_in = gr.Image(type="pil", label="Upload Image") |
|
|
classify_img_btn = gr.Button("Classify") |
|
|
img_clear_btn = gr.ClearButton(components=img_in) |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
with gr.Group(elem_classes="apple-card"): |
|
|
img_out = gr.Label(label="Prediction") |
|
|
|
|
|
classify_img_btn.click( |
|
|
fn=moderate_image, inputs=img_in, outputs=img_out |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Tab("Toxic Text Detection"): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=3): |
|
|
with gr.Group(elem_classes="apple-card"): |
|
|
txt_in = gr.Textbox(lines=4, label="Enter Text") |
|
|
classify_txt_btn = gr.Button("Analyze") |
|
|
text_clear_btn = gr.ClearButton(components=txt_in) |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
with gr.Group(elem_classes="apple-card"): |
|
|
txt_out = gr.Label(label="Prediction") |
|
|
|
|
|
classify_txt_btn.click(classify_toxic, inputs=txt_in, outputs=txt_out) |
|
|
|
|
|
gr.Markdown( |
|
|
"<div class='footer-custom'>Demo by 7th • Powered by Transformers</div>" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|