File size: 4,569 Bytes
e562ce4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbd83e1
e562ce4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c2c50e
e562ce4
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
"""Configuration tab UI components."""

import os
import sys

import gradio as gr

sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from utils.constants import MODELS, REASONING_EFFORTS, RESPONSE_FORMAT
from utils.model_interface import extract_model_id, get_default_system_prompt


def build_config_tab() -> dict:
    """Build the configuration tab UI."""
    with gr.Tab("⚙️ Model Choice & Configuration"):
        gr.Markdown("### Model Selection")

        model_choices = [f"{m['name']} ({m['id']})" for m in MODELS]
        model_dropdown = gr.Dropdown(label="Model", choices=model_choices, value=model_choices[0])
        reasoning_effort = gr.Dropdown(label="Reasoning Effort (GPT-OSS only)", choices=REASONING_EFFORTS, value="Low", visible=True)

        def update_reasoning_visibility(choice):
            """Update reasoning effort visibility based on selected model."""
            if not choice:
                return gr.update(visible=False)
            model_id = extract_model_id(choice)
            return gr.update(visible=model_id.startswith("openai/gpt-oss") if model_id else False)

        def update_system_prompt(model_choice, reasoning_effort_val):
            """Update system prompt when model or reasoning effort changes."""
            if not model_choice:
                return ""
            model_id = extract_model_id(model_choice)
            return get_default_system_prompt(model_id, reasoning_effort_val)

        # Initialize system prompt with default for first model
        initial_model_id = extract_model_id(model_choices[0])
        initial_system_prompt = get_default_system_prompt(initial_model_id, "Low")

        gr.Markdown("---")
        gr.Markdown("### System Prompt & Response Format")
        gr.Markdown("*Edit the prompts below. System prompt varies by model type; response format is used for GPT-OSS developer channel and Qwen.*")

        with gr.Row():
            with gr.Column():
                system_prompt_textbox = gr.Textbox(
                    label="System Prompt",
                    placeholder="System prompt will be auto-generated based on model...",
                    lines=10,
                    value=initial_system_prompt,
                    interactive=True,
                )
            
            with gr.Column():
                response_format_textbox = gr.Textbox(
                    label="Response Format",
                    placeholder="Response format instructions...",
                    lines=10,
                    value=RESPONSE_FORMAT,
                    interactive=True,
                )
        
        gr.Markdown("*Edit the prompts above. Values are used directly when running tests.*")

        def update_on_model_change(choice, reasoning_effort_val):
            """Update both reasoning visibility and system prompt when model changes."""
            visibility_update = update_reasoning_visibility(choice)
            system_prompt_update = update_system_prompt(choice, reasoning_effort_val)
            return visibility_update, system_prompt_update

        # Update reasoning visibility and system prompt when model changes
        model_dropdown.change(
            update_on_model_change,
            inputs=[model_dropdown, reasoning_effort],
            outputs=[reasoning_effort, system_prompt_textbox],
        )

        # Update system prompt when reasoning effort changes (for GPT-OSS)
        def update_on_reasoning_change(choice, effort):
            """Update system prompt when reasoning effort changes."""
            if not choice:
                return ""
            return update_system_prompt(choice, effort)
        
        reasoning_effort.change(
            update_on_reasoning_change,
            inputs=[model_dropdown, reasoning_effort],
            outputs=system_prompt_textbox,
        )

        gr.Markdown("---")
        with gr.Accordion("Generation Parameters", open=False):
            max_tokens = gr.Number(label="Max Tokens", value=9192, precision=0)
            temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.1, step=0.1)
            top_p = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=0.9, step=0.1)

    return {
        "model_dropdown": model_dropdown,
        "reasoning_effort": reasoning_effort,
        "system_prompt_textbox": system_prompt_textbox,
        "response_format_textbox": response_format_textbox,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "top_p": top_p,
    }