Spaces:
Running
Running
github-actions[bot]
commited on
Commit
·
930dd4f
1
Parent(s):
720bedd
Auto-sync from demo at Thu Nov 20 11:09:35 UTC 2025
Browse files- app.py +2 -2
- graphgen/models/llm/api/openai_client.py +4 -4
- webui/app.py +2 -2
app.py
CHANGED
|
@@ -42,7 +42,7 @@ def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
|
| 42 |
|
| 43 |
tokenizer_instance = Tokenizer(config.get("tokenizer", "cl100k_base"))
|
| 44 |
synthesizer_llm_client = OpenAIClient(
|
| 45 |
-
|
| 46 |
base_url=env.get("SYNTHESIZER_BASE_URL", ""),
|
| 47 |
api_key=env.get("SYNTHESIZER_API_KEY", ""),
|
| 48 |
request_limit=True,
|
|
@@ -51,7 +51,7 @@ def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
|
| 51 |
tokenizer=tokenizer_instance,
|
| 52 |
)
|
| 53 |
trainee_llm_client = OpenAIClient(
|
| 54 |
-
|
| 55 |
base_url=env.get("TRAINEE_BASE_URL", ""),
|
| 56 |
api_key=env.get("TRAINEE_API_KEY", ""),
|
| 57 |
request_limit=True,
|
|
|
|
| 42 |
|
| 43 |
tokenizer_instance = Tokenizer(config.get("tokenizer", "cl100k_base"))
|
| 44 |
synthesizer_llm_client = OpenAIClient(
|
| 45 |
+
model=env.get("SYNTHESIZER_MODEL", ""),
|
| 46 |
base_url=env.get("SYNTHESIZER_BASE_URL", ""),
|
| 47 |
api_key=env.get("SYNTHESIZER_API_KEY", ""),
|
| 48 |
request_limit=True,
|
|
|
|
| 51 |
tokenizer=tokenizer_instance,
|
| 52 |
)
|
| 53 |
trainee_llm_client = OpenAIClient(
|
| 54 |
+
model=env.get("TRAINEE_MODEL", ""),
|
| 55 |
base_url=env.get("TRAINEE_BASE_URL", ""),
|
| 56 |
api_key=env.get("TRAINEE_API_KEY", ""),
|
| 57 |
request_limit=True,
|
graphgen/models/llm/api/openai_client.py
CHANGED
|
@@ -32,7 +32,7 @@ class OpenAIClient(BaseLLMWrapper):
|
|
| 32 |
def __init__(
|
| 33 |
self,
|
| 34 |
*,
|
| 35 |
-
|
| 36 |
api_key: Optional[str] = None,
|
| 37 |
base_url: Optional[str] = None,
|
| 38 |
json_mode: bool = False,
|
|
@@ -44,7 +44,7 @@ class OpenAIClient(BaseLLMWrapper):
|
|
| 44 |
**kwargs: Any,
|
| 45 |
):
|
| 46 |
super().__init__(**kwargs)
|
| 47 |
-
self.
|
| 48 |
self.api_key = api_key
|
| 49 |
self.base_url = base_url
|
| 50 |
self.json_mode = json_mode
|
|
@@ -109,7 +109,7 @@ class OpenAIClient(BaseLLMWrapper):
|
|
| 109 |
kwargs["max_tokens"] = 1
|
| 110 |
|
| 111 |
completion = await self.client.chat.completions.create( # pylint: disable=E1125
|
| 112 |
-
model=self.
|
| 113 |
)
|
| 114 |
|
| 115 |
tokens = get_top_response_tokens(completion)
|
|
@@ -141,7 +141,7 @@ class OpenAIClient(BaseLLMWrapper):
|
|
| 141 |
await self.tpm.wait(estimated_tokens, silent=True)
|
| 142 |
|
| 143 |
completion = await self.client.chat.completions.create( # pylint: disable=E1125
|
| 144 |
-
model=self.
|
| 145 |
)
|
| 146 |
if hasattr(completion, "usage"):
|
| 147 |
self.token_usage.append(
|
|
|
|
| 32 |
def __init__(
|
| 33 |
self,
|
| 34 |
*,
|
| 35 |
+
model: str = "gpt-4o-mini",
|
| 36 |
api_key: Optional[str] = None,
|
| 37 |
base_url: Optional[str] = None,
|
| 38 |
json_mode: bool = False,
|
|
|
|
| 44 |
**kwargs: Any,
|
| 45 |
):
|
| 46 |
super().__init__(**kwargs)
|
| 47 |
+
self.model = model
|
| 48 |
self.api_key = api_key
|
| 49 |
self.base_url = base_url
|
| 50 |
self.json_mode = json_mode
|
|
|
|
| 109 |
kwargs["max_tokens"] = 1
|
| 110 |
|
| 111 |
completion = await self.client.chat.completions.create( # pylint: disable=E1125
|
| 112 |
+
model=self.model, **kwargs
|
| 113 |
)
|
| 114 |
|
| 115 |
tokens = get_top_response_tokens(completion)
|
|
|
|
| 141 |
await self.tpm.wait(estimated_tokens, silent=True)
|
| 142 |
|
| 143 |
completion = await self.client.chat.completions.create( # pylint: disable=E1125
|
| 144 |
+
model=self.model, **kwargs
|
| 145 |
)
|
| 146 |
if hasattr(completion, "usage"):
|
| 147 |
self.token_usage.append(
|
webui/app.py
CHANGED
|
@@ -42,7 +42,7 @@ def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
|
| 42 |
|
| 43 |
tokenizer_instance = Tokenizer(config.get("tokenizer", "cl100k_base"))
|
| 44 |
synthesizer_llm_client = OpenAIClient(
|
| 45 |
-
|
| 46 |
base_url=env.get("SYNTHESIZER_BASE_URL", ""),
|
| 47 |
api_key=env.get("SYNTHESIZER_API_KEY", ""),
|
| 48 |
request_limit=True,
|
|
@@ -51,7 +51,7 @@ def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
|
| 51 |
tokenizer=tokenizer_instance,
|
| 52 |
)
|
| 53 |
trainee_llm_client = OpenAIClient(
|
| 54 |
-
|
| 55 |
base_url=env.get("TRAINEE_BASE_URL", ""),
|
| 56 |
api_key=env.get("TRAINEE_API_KEY", ""),
|
| 57 |
request_limit=True,
|
|
|
|
| 42 |
|
| 43 |
tokenizer_instance = Tokenizer(config.get("tokenizer", "cl100k_base"))
|
| 44 |
synthesizer_llm_client = OpenAIClient(
|
| 45 |
+
model=env.get("SYNTHESIZER_MODEL", ""),
|
| 46 |
base_url=env.get("SYNTHESIZER_BASE_URL", ""),
|
| 47 |
api_key=env.get("SYNTHESIZER_API_KEY", ""),
|
| 48 |
request_limit=True,
|
|
|
|
| 51 |
tokenizer=tokenizer_instance,
|
| 52 |
)
|
| 53 |
trainee_llm_client = OpenAIClient(
|
| 54 |
+
model=env.get("TRAINEE_MODEL", ""),
|
| 55 |
base_url=env.get("TRAINEE_BASE_URL", ""),
|
| 56 |
api_key=env.get("TRAINEE_API_KEY", ""),
|
| 57 |
request_limit=True,
|