Yacine Jernite commited on
Commit
c90d7c6
·
1 Parent(s): 463d65e

native error handling

Browse files
Files changed (5) hide show
  1. app.py +38 -25
  2. ui/tab_dataset.py +9 -4
  3. ui/tab_policy.py +56 -11
  4. utils/helpers.py +11 -4
  5. utils/model_interface.py +25 -10
app.py CHANGED
@@ -69,34 +69,37 @@ def handle_run_test(test_input, current_policy, model_choice, reasoning_effort,
69
  """Handle test execution."""
70
 
71
  if not test_input or not test_input.strip():
72
- model_info = format_model_info(model_choice, reasoning_effort)
73
- return model_info, "*Please enter test content*", "*No content*", "*No response yet*", gr.update(value="", visible=False), gr.update(value="", visible=False)
74
 
75
  if not current_policy or current_policy == "*No policy loaded*":
76
- model_info = format_model_info(model_choice, reasoning_effort)
77
- return model_info, "*Please load a policy first*", "*No policy*", "*No response yet*", gr.update(value="", visible=False), gr.update(value="", visible=False)
78
 
79
  # OAuth token is automatically injected by Gradio - we don't pass login_button as input
80
  # Use inference token (org preferred, falls back to personal)
81
  hf_token, _ = get_inference_token(oauth_token)
82
  if hf_token is None:
83
- model_info = format_model_info(model_choice, reasoning_effort)
84
- return model_info, "*Please log in or set tokens to use Inference Providers*", "*Authentication required*", "*No response yet*", gr.update(value="", visible=False), gr.update(value="", visible=False)
85
 
86
  model_id = extract_model_id(model_choice)
87
 
88
- result = run_test(
89
- model_id=model_id,
90
- test_input=test_input,
91
- policy=current_policy,
92
- hf_token=hf_token,
93
- reasoning_effort=reasoning_effort,
94
- max_tokens=int(max_tokens),
95
- temperature=float(temperature),
96
- top_p=float(top_p),
97
- system_prompt=system_prompt_val,
98
- response_format=response_format_val,
99
- )
 
 
 
 
 
 
100
  label_text, parsed, cat_text, reasoning, raw_response = format_test_result(result)
101
  reasoning_visible = bool(reasoning and reasoning.strip())
102
  model_info = format_model_info(model_choice, reasoning_effort)
@@ -112,9 +115,13 @@ def handle_run_test(test_input, current_policy, model_choice, reasoning_effort,
112
  reasoning, reasoning_effort, max_tokens, temperature, top_p,
113
  system_prompt_val, response_format_val
114
  )
115
- save_to_dataset(get_roost_dataset_repo_id(), org_token, data)
 
 
 
 
116
  except Exception as e:
117
- print(f"Failed to save to ROOST dataset: {e}")
118
  elif save_mode == "Save to Private Dataset":
119
  personal_token, _ = get_personal_token(oauth_token)
120
  if personal_token:
@@ -124,9 +131,13 @@ def handle_run_test(test_input, current_policy, model_choice, reasoning_effort,
124
  reasoning, reasoning_effort, max_tokens, temperature, top_p,
125
  system_prompt_val, response_format_val
126
  )
127
- save_to_dataset(get_dataset_repo_id(personal_token), personal_token, data)
 
 
 
 
128
  except Exception as e:
129
- print(f"Failed to save to private dataset: {e}")
130
 
131
  return (
132
  model_info,
@@ -281,7 +292,7 @@ with gr.Blocks(title="Moderation Model Testing") as demo:
281
  try:
282
  idx = dropdown_choices_list.index(selected_label)
283
  if not (0 <= idx < len(cached_examples_list)):
284
- return [None] * 15
285
 
286
  example = cached_examples_list[idx]
287
  policy = example.get("policy", "") or ""
@@ -314,8 +325,10 @@ with gr.Blocks(title="Moderation Model Testing") as demo:
314
  gr.update(value=reasoning_info_text, visible=reasoning_info_visible),
315
  gr.update(value=reasoning_trace or "", visible=reasoning_visible),
316
  )
317
- except (ValueError, IndexError):
318
- return [None] * 15
 
 
319
 
320
  example_dropdown.change(
321
  load_example_from_dataset,
 
69
  """Handle test execution."""
70
 
71
  if not test_input or not test_input.strip():
72
+ raise gr.Error("Please enter test content before running a test.")
 
73
 
74
  if not current_policy or current_policy == "*No policy loaded*":
75
+ raise gr.Error("Please load a policy first. Go to the Policy Definition tab to upload or select a policy.")
 
76
 
77
  # OAuth token is automatically injected by Gradio - we don't pass login_button as input
78
  # Use inference token (org preferred, falls back to personal)
79
  hf_token, _ = get_inference_token(oauth_token)
80
  if hf_token is None:
81
+ raise gr.Error("Please log in or set tokens to use Inference Providers. Check the sidebar for authentication options.")
 
82
 
83
  model_id = extract_model_id(model_choice)
84
 
85
+ try:
86
+ result = run_test(
87
+ model_id=model_id,
88
+ test_input=test_input,
89
+ policy=current_policy,
90
+ hf_token=hf_token,
91
+ reasoning_effort=reasoning_effort,
92
+ max_tokens=int(max_tokens),
93
+ temperature=float(temperature),
94
+ top_p=float(top_p),
95
+ system_prompt=system_prompt_val,
96
+ response_format=response_format_val,
97
+ )
98
+ except gr.Error:
99
+ raise # Re-raise Gradio errors
100
+ except Exception as e:
101
+ raise gr.Error(f"Unexpected error during model inference: {str(e)}. Please try again.")
102
+
103
  label_text, parsed, cat_text, reasoning, raw_response = format_test_result(result)
104
  reasoning_visible = bool(reasoning and reasoning.strip())
105
  model_info = format_model_info(model_choice, reasoning_effort)
 
115
  reasoning, reasoning_effort, max_tokens, temperature, top_p,
116
  system_prompt_val, response_format_val
117
  )
118
+ success, message = save_to_dataset(get_roost_dataset_repo_id(), org_token, data)
119
+ if not success:
120
+ raise gr.Error(f"Failed to save to ROOST dataset: {message}. Please check your token permissions.")
121
+ except gr.Error:
122
+ raise # Re-raise Gradio errors
123
  except Exception as e:
124
+ raise gr.Error(f"Failed to save to ROOST dataset: {str(e)}. Please check your token permissions and try again.")
125
  elif save_mode == "Save to Private Dataset":
126
  personal_token, _ = get_personal_token(oauth_token)
127
  if personal_token:
 
131
  reasoning, reasoning_effort, max_tokens, temperature, top_p,
132
  system_prompt_val, response_format_val
133
  )
134
+ success, message = save_to_dataset(get_dataset_repo_id(personal_token), personal_token, data)
135
+ if not success:
136
+ raise gr.Error(f"Failed to save to private dataset: {message}. Please check your token permissions.")
137
+ except gr.Error:
138
+ raise # Re-raise Gradio errors
139
  except Exception as e:
140
+ raise gr.Error(f"Failed to save to private dataset: {str(e)}. Please check your token permissions and try again.")
141
 
142
  return (
143
  model_info,
 
292
  try:
293
  idx = dropdown_choices_list.index(selected_label)
294
  if not (0 <= idx < len(cached_examples_list)):
295
+ raise gr.Warning("Selected example index is out of range. Please refresh the dataset.")
296
 
297
  example = cached_examples_list[idx]
298
  policy = example.get("policy", "") or ""
 
325
  gr.update(value=reasoning_info_text, visible=reasoning_info_visible),
326
  gr.update(value=reasoning_trace or "", visible=reasoning_visible),
327
  )
328
+ except gr.Warning:
329
+ raise # Re-raise Gradio warnings
330
+ except (ValueError, IndexError) as e:
331
+ raise gr.Warning(f"Failed to load example: {str(e)}. Please try selecting a different example or refresh the dataset.")
332
 
333
  example_dropdown.change(
334
  load_example_from_dataset,
ui/tab_dataset.py CHANGED
@@ -107,7 +107,7 @@ def build_dataset_tab() -> dict:
107
  # Get personal token
108
  personal_token, _ = get_personal_token(oauth_token)
109
  if personal_token is None:
110
- return gr.update(choices=[]), "*Please log in or set personal token to browse private dataset*", [], []
111
  repo_id = get_dataset_repo_id(personal_token)
112
  token = personal_token
113
  else: # roost
@@ -116,9 +116,14 @@ def build_dataset_tab() -> dict:
116
  repo_id = get_roost_dataset_repo_id()
117
  token = org_token # Can be None for public access
118
 
119
- examples, labels = load_dataset_examples(repo_id, token)
120
- if not examples or not labels:
121
- return gr.update(choices=[], value=None), "*No examples found in dataset*", [], []
 
 
 
 
 
122
 
123
  preview = format_preview_markdown(examples[0])
124
  return gr.update(choices=labels, value=labels[0]), preview, examples, labels
 
107
  # Get personal token
108
  personal_token, _ = get_personal_token(oauth_token)
109
  if personal_token is None:
110
+ raise gr.Error("Please log in or set a personal token to browse your private dataset. Check the sidebar for authentication options.")
111
  repo_id = get_dataset_repo_id(personal_token)
112
  token = personal_token
113
  else: # roost
 
116
  repo_id = get_roost_dataset_repo_id()
117
  token = org_token # Can be None for public access
118
 
119
+ try:
120
+ examples, labels = load_dataset_examples(repo_id, token)
121
+ if not examples or not labels:
122
+ raise gr.Error(f"No examples found in dataset '{repo_id}'. Try saving some test results first.")
123
+ except gr.Error:
124
+ raise # Re-raise Gradio errors
125
+ except Exception as e:
126
+ raise gr.Error(f"Failed to load dataset '{repo_id}': {str(e)}. Please check your token permissions and try again.")
127
 
128
  preview = format_preview_markdown(examples[0])
129
  return gr.update(choices=labels, value=labels[0]), preview, examples, labels
ui/tab_policy.py CHANGED
@@ -14,6 +14,7 @@ def build_policy_tab(base_dir: str) -> dict:
14
  """Build the policy definition tab UI."""
15
  with gr.Tab("📋 Policy Definition"):
16
  current_policy_state = gr.State(value="")
 
17
 
18
  # Existing Policy Accordion
19
  with gr.Accordion("📥 Load Existing Policy", open=False):
@@ -45,28 +46,72 @@ def build_policy_tab(base_dir: str) -> dict:
45
  clear_policy_btn = gr.Button("Clear Policy", variant="secondary")
46
 
47
  # Handlers
48
- def load_preset_handler(name):
 
49
  if not name:
50
  return "", "*No policy loaded*", ""
51
- policy_text, _ = load_preset_policy(name, base_dir)
52
- return policy_text, policy_text, policy_text
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  load_preset_btn.click(
55
  load_preset_handler,
56
- inputs=preset_dropdown,
57
  outputs=[current_policy_state, manual_text, policy_preview],
58
  )
59
 
60
- def load_upload_handler(f):
61
- if f:
62
- policy_text, _ = load_policy_from_file(f.name)
63
- return policy_text, policy_text, policy_text
64
- return "", "", "*No policy loaded*"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  upload_file.change(
67
  load_upload_handler,
68
- inputs=upload_file,
69
- outputs=[current_policy_state, manual_text, policy_preview],
70
  )
71
 
72
  def update_preview(text):
 
14
  """Build the policy definition tab UI."""
15
  with gr.Tab("📋 Policy Definition"):
16
  current_policy_state = gr.State(value="")
17
+ uploaded_policies_state = gr.State(value={}) # Store uploaded policies: {"Uploaded - filename": content}
18
 
19
  # Existing Policy Accordion
20
  with gr.Accordion("📥 Load Existing Policy", open=False):
 
46
  clear_policy_btn = gr.Button("Clear Policy", variant="secondary")
47
 
48
  # Handlers
49
+ def load_preset_handler(name, uploaded_policies):
50
+ """Load policy from preset or uploaded policies."""
51
  if not name:
52
  return "", "*No policy loaded*", ""
53
+
54
+ # Check presets first
55
+ preset_choices = ["Hate Speech Policy", "Violence Policy", "Toxicity Policy"]
56
+ if name in preset_choices:
57
+ policy_text, _ = load_preset_policy(name, base_dir)
58
+ return policy_text, policy_text, policy_text
59
+
60
+ # Check uploaded policies
61
+ if name in uploaded_policies:
62
+ policy_text = uploaded_policies[name]
63
+ return policy_text, policy_text, policy_text
64
+
65
+ return "", "*No policy loaded*", ""
66
 
67
  load_preset_btn.click(
68
  load_preset_handler,
69
+ inputs=[preset_dropdown, uploaded_policies_state],
70
  outputs=[current_policy_state, manual_text, policy_preview],
71
  )
72
 
73
+ def load_upload_handler(f, uploaded_policies):
74
+ """Handle file upload: load policy, store it, and update dropdown."""
75
+ if not f:
76
+ return "", "", "*No policy loaded*", gr.update(), {}
77
+
78
+ # Extract filename
79
+ filename = os.path.basename(f.name)
80
+ upload_key = f"Uploaded - {filename}"
81
+
82
+ # Load policy content
83
+ policy_text, _ = load_policy_from_file(f.name)
84
+
85
+ # Ensure uploaded_policies is a dict (handle case where it might be None)
86
+ if uploaded_policies is None:
87
+ uploaded_policies = {}
88
+
89
+ # Check for duplicate BEFORE storing
90
+ is_duplicate = upload_key in uploaded_policies
91
+
92
+ # Store policy in state (overwrites if duplicate)
93
+ uploaded_policies[upload_key] = policy_text
94
+
95
+ # Build updated choices: presets + uploaded policies
96
+ preset_choices = ["Hate Speech Policy", "Violence Policy", "Toxicity Policy"]
97
+ all_choices = preset_choices + sorted(uploaded_policies.keys())
98
+
99
+ # Show warning if duplicate (gr.Warning is a function, not an exception)
100
+ if is_duplicate:
101
+ gr.Warning(f"Policy '{filename}' already uploaded. Previous version overwritten.")
102
+
103
+ return (
104
+ policy_text, # current_policy_state
105
+ policy_text, # manual_text
106
+ policy_text, # policy_preview
107
+ gr.update(choices=all_choices), # preset_dropdown
108
+ uploaded_policies # uploaded_policies_state
109
+ )
110
 
111
  upload_file.change(
112
  load_upload_handler,
113
+ inputs=[upload_file, uploaded_policies_state],
114
+ outputs=[current_policy_state, manual_text, policy_preview, preset_dropdown, uploaded_policies_state],
115
  )
116
 
117
  def update_preview(text):
utils/helpers.py CHANGED
@@ -162,13 +162,20 @@ def load_preset_policy(preset_name: str, base_dir: str) -> tuple[str, str]:
162
  policy_text = f.read()
163
  return policy_text, policy_text
164
  except FileNotFoundError:
165
- return f"*Error: Policy file {preset_files[preset_name]} not found*", ""
 
 
166
  return "", ""
167
 
168
 
169
  def load_policy_from_file(file_path: str) -> tuple[str, str]:
170
  """Load policy from uploaded file."""
171
- with open(file_path, "r") as f:
172
- content = f.read()
173
- return content, content
 
 
 
 
 
174
 
 
162
  policy_text = f.read()
163
  return policy_text, policy_text
164
  except FileNotFoundError:
165
+ raise gr.Error(f"Policy file '{preset_files[preset_name]}' not found at {policy_path}. Please check the file exists.")
166
+ except Exception as e:
167
+ raise gr.Error(f"Failed to load policy file '{preset_files[preset_name]}': {str(e)}")
168
  return "", ""
169
 
170
 
171
  def load_policy_from_file(file_path: str) -> tuple[str, str]:
172
  """Load policy from uploaded file."""
173
+ try:
174
+ with open(file_path, "r") as f:
175
+ content = f.read()
176
+ return content, content
177
+ except FileNotFoundError:
178
+ raise gr.Error(f"File not found: {file_path}. Please try uploading the file again.")
179
+ except Exception as e:
180
+ raise gr.Error(f"Failed to read policy file: {str(e)}. Please check the file format and try again.")
181
 
utils/model_interface.py CHANGED
@@ -109,22 +109,37 @@ def run_test(
109
  response_format: str = RESPONSE_FORMAT,
110
  ) -> dict:
111
  """Run test on model."""
 
 
112
  model_info = get_model_info(model_id)
113
  if not model_info:
114
- raise ValueError(f"Unknown model: {model_id}")
115
 
116
  client = OpenAI(base_url=ROUTER_URL, api_key=hf_token)
117
  messages = make_messages(test_input, policy, model_id, reasoning_effort, system_prompt, response_format)
118
 
119
- completion = client.chat.completions.create(
120
- model=model_id,
121
- messages=messages,
122
- max_tokens=max_tokens,
123
- temperature=temperature,
124
- top_p=top_p,
125
- stop=None,
126
- extra_headers={"X-HF-Bill-To": "roosttools"},
127
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  result = {"content": completion.choices[0].message.content}
130
 
 
109
  response_format: str = RESPONSE_FORMAT,
110
  ) -> dict:
111
  """Run test on model."""
112
+ import gradio as gr
113
+
114
  model_info = get_model_info(model_id)
115
  if not model_info:
116
+ raise gr.Error(f"Unknown model: {model_id}. Please select a valid model from the dropdown.")
117
 
118
  client = OpenAI(base_url=ROUTER_URL, api_key=hf_token)
119
  messages = make_messages(test_input, policy, model_id, reasoning_effort, system_prompt, response_format)
120
 
121
+ try:
122
+ completion = client.chat.completions.create(
123
+ model=model_id,
124
+ messages=messages,
125
+ max_tokens=max_tokens,
126
+ temperature=temperature,
127
+ top_p=top_p,
128
+ stop=None,
129
+ extra_headers={"X-HF-Bill-To": "roosttools"},
130
+ )
131
+ except Exception as e:
132
+ error_msg = str(e)
133
+ if "401" in error_msg or "authentication" in error_msg.lower():
134
+ raise gr.Error(f"Authentication failed: {error_msg}. Please check your token permissions.")
135
+ elif "400" in error_msg or "bad request" in error_msg.lower():
136
+ raise gr.Error(f"Invalid request: {error_msg}. Please check your input and try again.")
137
+ elif "429" in error_msg or "rate limit" in error_msg.lower():
138
+ raise gr.Error(f"Rate limit exceeded: {error_msg}. Please wait a moment and try again.")
139
+ elif "timeout" in error_msg.lower():
140
+ raise gr.Error(f"Request timed out: {error_msg}. Please try again with a shorter input or lower max_tokens.")
141
+ else:
142
+ raise gr.Error(f"Model inference failed: {error_msg}. Please check your inputs and try again.")
143
 
144
  result = {"content": completion.choices[0].message.content}
145