Spaces:
Running
Running
| """Configuration tab UI components.""" | |
| import os | |
| import sys | |
| import gradio as gr | |
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | |
| from utils.constants import MODELS, REASONING_EFFORTS, RESPONSE_FORMAT | |
| from utils.model_interface import extract_model_id, get_default_system_prompt | |
| def build_config_tab() -> dict: | |
| """Build the configuration tab UI.""" | |
| with gr.Tab("⚙️ Model Choice & Configuration"): | |
| gr.Markdown("### Model Selection") | |
| model_choices = [f"{m['name']} ({m['id']})" for m in MODELS] | |
| model_dropdown = gr.Dropdown(label="Model", choices=model_choices, value=model_choices[0]) | |
| reasoning_effort = gr.Dropdown(label="Reasoning Effort (GPT-OSS only)", choices=REASONING_EFFORTS, value="Low", visible=True) | |
| def update_reasoning_visibility(choice): | |
| """Update reasoning effort visibility based on selected model.""" | |
| if not choice: | |
| return gr.update(visible=False) | |
| model_id = extract_model_id(choice) | |
| return gr.update(visible=model_id.startswith("openai/gpt-oss") if model_id else False) | |
| def update_system_prompt(model_choice, reasoning_effort_val): | |
| """Update system prompt when model or reasoning effort changes.""" | |
| if not model_choice: | |
| return "" | |
| model_id = extract_model_id(model_choice) | |
| return get_default_system_prompt(model_id, reasoning_effort_val) | |
| # Initialize system prompt with default for first model | |
| initial_model_id = extract_model_id(model_choices[0]) | |
| initial_system_prompt = get_default_system_prompt(initial_model_id, "Low") | |
| gr.Markdown("---") | |
| gr.Markdown("### System Prompt & Response Format") | |
| gr.Markdown("*Edit the prompts below. System prompt varies by model type; response format is used for GPT-OSS developer channel and Qwen.*") | |
| with gr.Row(): | |
| with gr.Column(): | |
| system_prompt_textbox = gr.Textbox( | |
| label="System Prompt", | |
| placeholder="System prompt will be auto-generated based on model...", | |
| lines=10, | |
| value=initial_system_prompt, | |
| interactive=True, | |
| ) | |
| with gr.Column(): | |
| response_format_textbox = gr.Textbox( | |
| label="Response Format", | |
| placeholder="Response format instructions...", | |
| lines=10, | |
| value=RESPONSE_FORMAT, | |
| interactive=True, | |
| ) | |
| gr.Markdown("*Edit the prompts above. Values are used directly when running tests.*") | |
| def update_on_model_change(choice, reasoning_effort_val): | |
| """Update both reasoning visibility and system prompt when model changes.""" | |
| visibility_update = update_reasoning_visibility(choice) | |
| system_prompt_update = update_system_prompt(choice, reasoning_effort_val) | |
| return visibility_update, system_prompt_update | |
| # Update reasoning visibility and system prompt when model changes | |
| model_dropdown.change( | |
| update_on_model_change, | |
| inputs=[model_dropdown, reasoning_effort], | |
| outputs=[reasoning_effort, system_prompt_textbox], | |
| ) | |
| # Update system prompt when reasoning effort changes (for GPT-OSS) | |
| def update_on_reasoning_change(choice, effort): | |
| """Update system prompt when reasoning effort changes.""" | |
| if not choice: | |
| return "" | |
| return update_system_prompt(choice, effort) | |
| reasoning_effort.change( | |
| update_on_reasoning_change, | |
| inputs=[model_dropdown, reasoning_effort], | |
| outputs=system_prompt_textbox, | |
| ) | |
| gr.Markdown("---") | |
| with gr.Accordion("Generation Parameters", open=False): | |
| max_tokens = gr.Number(label="Max Tokens", value=9192, precision=0) | |
| temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.1, step=0.1) | |
| top_p = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=0.9, step=0.1) | |
| return { | |
| "model_dropdown": model_dropdown, | |
| "reasoning_effort": reasoning_effort, | |
| "system_prompt_textbox": system_prompt_textbox, | |
| "response_format_textbox": response_format_textbox, | |
| "max_tokens": max_tokens, | |
| "temperature": temperature, | |
| "top_p": top_p, | |
| } | |