Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import discord
|
|
| 2 |
import logging
|
| 3 |
import os
|
| 4 |
import requests
|
| 5 |
-
from huggingface_hub import InferenceClient
|
| 6 |
from transformers import pipeline
|
| 7 |
import asyncio
|
| 8 |
import subprocess
|
|
@@ -73,20 +73,22 @@ class MyClient(discord.Client):
|
|
| 73 |
loop = asyncio.get_event_loop()
|
| 74 |
|
| 75 |
# AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์๊ฒ ์ํ ๋ฌธ์ ๋ฅผ ํ๋๋ก ์์ฒญ
|
| 76 |
-
math_response_future = loop.run_in_executor(None, lambda: self.math_pipe(
|
| 77 |
-
|
| 78 |
-
## math_response_future = loop.run_in_executor(None, lambda: self.math_pipe([{"role": "user", "content": question}]))
|
| 79 |
math_response = await math_response_future
|
| 80 |
math_result = math_response[0]['generated_text']
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
return combined_response
|
| 92 |
|
|
@@ -105,10 +107,16 @@ class MyClient(discord.Client):
|
|
| 105 |
"""
|
| 106 |
conversation_history.append({"role": "user", "content": user_input})
|
| 107 |
messages = [{"role": "system", "content": f"{system_prefix}"}] + conversation_history
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
return f"{user_mention}, {full_response}"
|
| 113 |
|
| 114 |
async def send_long_message(self, channel, message):
|
|
|
|
| 2 |
import logging
|
| 3 |
import os
|
| 4 |
import requests
|
| 5 |
+
from huggingface_hub import InferenceClient, HfHubHTTPError
|
| 6 |
from transformers import pipeline
|
| 7 |
import asyncio
|
| 8 |
import subprocess
|
|
|
|
| 73 |
loop = asyncio.get_event_loop()
|
| 74 |
|
| 75 |
# AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์๊ฒ ์ํ ๋ฌธ์ ๋ฅผ ํ๋๋ก ์์ฒญ
|
| 76 |
+
math_response_future = loop.run_in_executor(None, lambda: self.math_pipe(question, max_new_tokens=2000))
|
|
|
|
|
|
|
| 77 |
math_response = await math_response_future
|
| 78 |
math_result = math_response[0]['generated_text']
|
| 79 |
|
| 80 |
+
try:
|
| 81 |
+
# Cohere ๋ชจ๋ธ์๊ฒ AI-MO/NuminaMath-7B-TIR ๋ชจ๋ธ์ ๊ฒฐ๊ณผ๋ฅผ ๋ฒ์ญํ๋๋ก ์์ฒญ
|
| 82 |
+
cohere_response_future = loop.run_in_executor(None, lambda: hf_client.chat_completion(
|
| 83 |
+
[{"role": "system", "content": "๋ค์ ํ
์คํธ๋ฅผ ํ๊ธ๋ก ๋ฒ์ญํ์ญ์์ค: "}, {"role": "user", "content": math_result}], max_tokens=1000))
|
| 84 |
|
| 85 |
+
cohere_response = await cohere_response_future
|
| 86 |
+
cohere_result = ''.join([part.choices[0].delta.content for part in cohere_response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
|
| 87 |
|
| 88 |
+
combined_response = f"์ํ ์ ์๋ ๋ต๋ณ: {cohere_result}"
|
| 89 |
+
except HfHubHTTPError as e:
|
| 90 |
+
logging.error(f"Hugging Face API error: {e}")
|
| 91 |
+
combined_response = "An error occurred while processing the request."
|
| 92 |
|
| 93 |
return combined_response
|
| 94 |
|
|
|
|
| 107 |
"""
|
| 108 |
conversation_history.append({"role": "user", "content": user_input})
|
| 109 |
messages = [{"role": "system", "content": f"{system_prefix}"}] + conversation_history
|
| 110 |
+
|
| 111 |
+
try:
|
| 112 |
+
response = await asyncio.get_event_loop().run_in_executor(None, lambda: hf_client.chat_completion(
|
| 113 |
+
messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
|
| 114 |
+
full_response = ''.join([part.choices[0].delta.content for part in response if part.choices and part.choices[0].delta and part.choices[0].delta.content])
|
| 115 |
+
conversation_history.append({"role": "assistant", "content": full_response})
|
| 116 |
+
except HfHubHTTPError as e:
|
| 117 |
+
logging.error(f"Hugging Face API error: {e}")
|
| 118 |
+
full_response = "An error occurred while generating the response."
|
| 119 |
+
|
| 120 |
return f"{user_mention}, {full_response}"
|
| 121 |
|
| 122 |
async def send_long_message(self, channel, message):
|