Yacine Jernite commited on
Commit
39e49a6
·
1 Parent(s): caa0a29

more working models

Browse files
Files changed (2) hide show
  1. utils/constants.py +30 -0
  2. utils/model_interface.py +2 -1
utils/constants.py CHANGED
@@ -8,6 +8,18 @@ MODELS = [
8
  "is_thinking": True,
9
  "supports_reasoning_level": True,
10
  },
 
 
 
 
 
 
 
 
 
 
 
 
11
  {
12
  "name": "Qwen3-Next-80B-Instruct",
13
  "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
@@ -20,6 +32,24 @@ MODELS = [
20
  "is_thinking": True,
21
  "supports_reasoning_level": False,
22
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  ]
24
 
25
  UNUSED_MODELS = [
 
8
  "is_thinking": True,
9
  "supports_reasoning_level": True,
10
  },
11
+ {
12
+ "name": "GPT-OSS-20B",
13
+ "id": "openai/gpt-oss-20b",
14
+ "is_thinking": True,
15
+ "supports_reasoning_level": True,
16
+ },
17
+ {
18
+ "name": "GPT-OSS-120B",
19
+ "id": "openai/gpt-oss-120b",
20
+ "is_thinking": True,
21
+ "supports_reasoning_level": True,
22
+ },
23
  {
24
  "name": "Qwen3-Next-80B-Instruct",
25
  "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
 
32
  "is_thinking": True,
33
  "supports_reasoning_level": False,
34
  },
35
+ {
36
+ "name": "Qwen3-4B-Instruct",
37
+ "id": "Qwen/Qwen3-4B-Instruct-2507",
38
+ "is_thinking": False,
39
+ "supports_reasoning_level": False,
40
+ },
41
+ {
42
+ "name": "Gemma-3-12B-Instruct",
43
+ "id": "google/gemma-3-12b-it",
44
+ "is_thinking": False,
45
+ "supports_reasoning_level": False,
46
+ },
47
+ {
48
+ "name": "Gemma-3-27B-Instruct",
49
+ "id": "google/gemma-3-27b-it",
50
+ "is_thinking": False,
51
+ "supports_reasoning_level": False,
52
+ },
53
  ]
54
 
55
  UNUSED_MODELS = [
utils/model_interface.py CHANGED
@@ -62,7 +62,7 @@ def get_default_system_prompt(model_id: str, reasoning_effort: str = "Low") -> s
62
 
63
  def make_messages(test: str, policy: str, model_id: str, reasoning_effort: str = "Low", system_prompt: str | None = None, response_format: str = RESPONSE_FORMAT) -> list[dict]:
64
  """Create messages based on model type."""
65
- if is_gptoss_model(model_id):
66
  # GPT-OSS uses Harmony encoding
67
  enc = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
68
  system_content = SystemContent.new().with_reasoning_effort(reasoning_effort)
@@ -123,6 +123,7 @@ def run_test(
123
  temperature=temperature,
124
  top_p=top_p,
125
  stop=None,
 
126
  )
127
 
128
  result = {"content": completion.choices[0].message.content}
 
62
 
63
  def make_messages(test: str, policy: str, model_id: str, reasoning_effort: str = "Low", system_prompt: str | None = None, response_format: str = RESPONSE_FORMAT) -> list[dict]:
64
  """Create messages based on model type."""
65
+ if model_id.startswith("openai/gpt-oss-safeguard"):
66
  # GPT-OSS uses Harmony encoding
67
  enc = load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS)
68
  system_content = SystemContent.new().with_reasoning_effort(reasoning_effort)
 
123
  temperature=temperature,
124
  top_p=top_p,
125
  stop=None,
126
+ # extra_headers={"X-HF-Bill-To": "roosttools"},
127
  )
128
 
129
  result = {"content": completion.choices[0].message.content}