Spaces:
Runtime error
Runtime error
Update agentverse/llms/openai.py
Browse files- agentverse/llms/openai.py +21 -18
agentverse/llms/openai.py
CHANGED
|
@@ -17,6 +17,9 @@ from . import llm_registry
|
|
| 17 |
from .base import BaseChatModel, BaseCompletionModel, BaseModelArgs
|
| 18 |
from .utils.jsonrepair import JsonRepair
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
try:
|
| 21 |
import openai
|
| 22 |
from openai.error import OpenAIError
|
|
@@ -44,7 +47,7 @@ else:
|
|
| 44 |
|
| 45 |
|
| 46 |
class OpenAIChatArgs(BaseModelArgs):
|
| 47 |
-
model: str = Field(default="gpt-3.5-turbo")
|
| 48 |
deployment_id: str = Field(default=None)
|
| 49 |
max_tokens: int = Field(default=2048)
|
| 50 |
temperature: float = Field(default=1.0)
|
|
@@ -93,9 +96,9 @@ class OpenAIChatArgs(BaseModelArgs):
|
|
| 93 |
# )
|
| 94 |
|
| 95 |
|
| 96 |
-
@llm_registry.register("gpt-
|
| 97 |
-
@llm_registry.register("gpt-3.5-turbo")
|
| 98 |
-
@llm_registry.register("gpt-4")
|
| 99 |
class OpenAIChat(BaseChatModel):
|
| 100 |
args: OpenAIChatArgs = Field(default_factory=OpenAIChatArgs)
|
| 101 |
|
|
@@ -294,23 +297,23 @@ class OpenAIChat(BaseChatModel):
|
|
| 294 |
|
| 295 |
def get_spend(self) -> int:
|
| 296 |
input_cost_map = {
|
| 297 |
-
"gpt-3.5-turbo": 0.0015,
|
| 298 |
-
"gpt-3.5-turbo
|
| 299 |
-
"gpt-
|
| 300 |
-
"gpt-3.5-turbo-16k
|
| 301 |
-
"gpt-4": 0.03,
|
| 302 |
-
"gpt-4
|
| 303 |
-
"gpt-4
|
| 304 |
}
|
| 305 |
|
| 306 |
output_cost_map = {
|
| 307 |
-
"gpt-3.5-turbo": 0.002,
|
| 308 |
-
"gpt-3.5-turbo-16k": 0.004,
|
| 309 |
-
"gpt-3.5-turbo-
|
| 310 |
-
"gpt-3.5-turbo-16k
|
| 311 |
-
"gpt-4": 0.06,
|
| 312 |
-
"gpt-4
|
| 313 |
-
"gpt-4
|
| 314 |
}
|
| 315 |
|
| 316 |
model = self.args.model
|
|
|
|
| 17 |
from .base import BaseChatModel, BaseCompletionModel, BaseModelArgs
|
| 18 |
from .utils.jsonrepair import JsonRepair
|
| 19 |
|
| 20 |
+
openai.api_base = "https://openrouter.ai/api/v1"
|
| 21 |
+
openai.api_key = os.getenv("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")
|
| 22 |
+
|
| 23 |
try:
|
| 24 |
import openai
|
| 25 |
from openai.error import OpenAIError
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
class OpenAIChatArgs(BaseModelArgs):
|
| 50 |
+
model: str = Field(default="openai/gpt-3.5-turbo")
|
| 51 |
deployment_id: str = Field(default=None)
|
| 52 |
max_tokens: int = Field(default=2048)
|
| 53 |
temperature: float = Field(default=1.0)
|
|
|
|
| 96 |
# )
|
| 97 |
|
| 98 |
|
| 99 |
+
@llm_registry.register("openai/gpt-3.5-turbo")
|
| 100 |
+
@llm_registry.register("openai/gpt-3.5-turbo-16k")
|
| 101 |
+
@llm_registry.register("openai/gpt-4")
|
| 102 |
class OpenAIChat(BaseChatModel):
|
| 103 |
args: OpenAIChatArgs = Field(default_factory=OpenAIChatArgs)
|
| 104 |
|
|
|
|
| 297 |
|
| 298 |
def get_spend(self) -> int:
|
| 299 |
input_cost_map = {
|
| 300 |
+
"openai/gpt-3.5-turbo": 0.0015,
|
| 301 |
+
"openai/gpt-3.5-turbo": 0.003,
|
| 302 |
+
"openai/gpt-4": 0.0015,
|
| 303 |
+
"openai/gpt-3.5-turbo-16k": 0.003,
|
| 304 |
+
"openai/gpt-4": 0.03,
|
| 305 |
+
"openai/gpt-4": 0.03,
|
| 306 |
+
"openai/gpt-4": 0.06,
|
| 307 |
}
|
| 308 |
|
| 309 |
output_cost_map = {
|
| 310 |
+
"openai/gpt-3.5-turbo": 0.002,
|
| 311 |
+
"openai/gpt-3.5-turbo-16k": 0.004,
|
| 312 |
+
"openai/gpt-3.5-turbo-16k": 0.002,
|
| 313 |
+
"openai/gpt-3.5-turbo-16k": 0.004,
|
| 314 |
+
"openai/gpt-4": 0.06,
|
| 315 |
+
"openai/gpt-4": 0.06,
|
| 316 |
+
"openai/gpt-4": 0.12,
|
| 317 |
}
|
| 318 |
|
| 319 |
model = self.args.model
|