yjernite's picture
yjernite HF Staff
Update app.py
ca14219 verified
raw
history blame
6.25 kB
"""Main Gradio app for moderation model testing."""
import os
import sys
import gradio as gr
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from utils.helpers import get_hf_token
from utils.model_interface import extract_model_id, run_test
from ui.sidebar import build_sidebar
from ui.tab_config import build_config_tab
from ui.tab_policy import build_policy_tab
from ui.tab_testing import (
build_testing_tab,
format_model_info,
format_reasoning_info,
format_test_result,
)
# ============================================================================
# Handlers
# ============================================================================
def handle_run_test(test_input, current_policy, model_choice, reasoning_effort, max_tokens, temperature, top_p, system_prompt_val, response_format_val, oauth_token: gr.OAuthToken | None = None):
"""Handle test execution."""
if not test_input or not test_input.strip():
model_info = format_model_info(model_choice, reasoning_effort)
return model_info, "*Please enter test content*", "*No content*", "*No response yet*", gr.update(value="", visible=False), gr.update(value="", visible=False)
if not current_policy or current_policy == "*No policy loaded*":
model_info = format_model_info(model_choice, reasoning_effort)
return model_info, "*Please load a policy first*", "*No policy*", "*No response yet*", gr.update(value="", visible=False), gr.update(value="", visible=False)
# OAuth token is automatically injected by Gradio - we don't pass login_button as input
hf_token, _ = get_hf_token(oauth_token)
if hf_token is None:
model_info = format_model_info(model_choice, reasoning_effort)
return model_info, "*Please log in to use Inference Providers*", "*Authentication required*", "*No response yet*", gr.update(value="", visible=False), gr.update(value="", visible=False)
model_id = extract_model_id(model_choice)
result = run_test(
model_id=model_id,
test_input=test_input,
policy=current_policy,
hf_token=hf_token,
reasoning_effort=reasoning_effort,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
system_prompt=system_prompt_val,
response_format=response_format_val,
)
label_text, parsed, cat_text, reasoning, raw_response = format_test_result(result)
reasoning_visible = bool(reasoning and reasoning.strip())
model_info = format_model_info(model_choice, reasoning_effort)
reasoning_info_text, reasoning_info_visible = format_reasoning_info(model_choice, reasoning)
return (
model_info,
label_text,
cat_text,
raw_response,
gr.update(value=reasoning_info_text, visible=reasoning_info_visible),
gr.update(value=reasoning or "", visible=reasoning_visible),
)
# ============================================================================
# UI Components
# ============================================================================
with gr.Blocks(title="Moderation Model Testing") as demo:
gr.Markdown("# Moderation Model Testing Interface")
gr.Markdown(
"Test moderation models with custom content policies. Define your policy, select a model, "
"and evaluate how different models classify content according to your rules. "
"Supports reasoning models that provide detailed explanations for their decisions."
)
# Sidebar (collapsible)
sidebar_components = build_sidebar()
login_button = sidebar_components["login_button"]
# Main content area with tabs
with gr.Tabs():
# Build tabs
testing_components = build_testing_tab()
test_input = testing_components["test_input"]
run_test_btn = testing_components["run_test_btn"]
model_info_display = testing_components["model_info_display"]
label_display = testing_components["label_display"]
categories_display = testing_components["categories_display"]
model_response_display = testing_components["model_response_display"]
reasoning_info = testing_components["reasoning_info"]
reasoning_display = testing_components["reasoning_display"]
policy_components = build_policy_tab(os.path.dirname(__file__))
current_policy_state = policy_components["current_policy_state"]
config_components = build_config_tab()
model_dropdown = config_components["model_dropdown"]
reasoning_effort = config_components["reasoning_effort"]
max_tokens = config_components["max_tokens"]
temperature = config_components["temperature"]
top_p = config_components["top_p"]
system_prompt_textbox = config_components["system_prompt_textbox"]
response_format_textbox = config_components["response_format_textbox"]
# ============================================================================
# Event Handlers
# ============================================================================
# Cross-tab handler: Run test (needs components from all tabs)
run_test_btn.click(
handle_run_test,
inputs=[
test_input,
current_policy_state,
model_dropdown,
reasoning_effort,
max_tokens,
temperature,
top_p,
system_prompt_textbox,
response_format_textbox,
],
outputs=[
model_info_display,
label_display,
categories_display,
model_response_display,
reasoning_info,
reasoning_display,
],
)
model_dropdown.change(
format_model_info,
inputs=[model_dropdown, reasoning_effort],
outputs=model_info_display,
)
reasoning_effort.change(
format_model_info,
inputs=[model_dropdown, reasoning_effort],
outputs=model_info_display,
)
if __name__ == "__main__":
demo.launch(ssr_mode=False)