Spaces:
Sleeping
Sleeping
Commit
·
8d3b67a
1
Parent(s):
d6cd6c2
Added streaming to problem generation
Browse files- api/llm.py +16 -10
- app.py +3 -1
api/llm.py
CHANGED
|
@@ -21,8 +21,10 @@ class LLMManager:
|
|
| 21 |
|
| 22 |
if self.streaming:
|
| 23 |
self.end_interview = self.end_interview_stream
|
|
|
|
| 24 |
else:
|
| 25 |
self.end_interview = self.end_interview_full
|
|
|
|
| 26 |
|
| 27 |
def text_processor(self):
|
| 28 |
def ans_full(response):
|
|
@@ -92,7 +94,7 @@ class LLMManager:
|
|
| 92 |
{"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
|
| 93 |
]
|
| 94 |
|
| 95 |
-
def
|
| 96 |
full_prompt = (
|
| 97 |
f"Create a {difficulty} {topic} coding problem. "
|
| 98 |
f"Additional requirements: {requirements}. "
|
|
@@ -103,15 +105,20 @@ class LLMManager:
|
|
| 103 |
if self.is_demo:
|
| 104 |
full_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
|
| 105 |
|
| 106 |
-
|
| 107 |
-
[
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
-
|
| 114 |
-
|
|
|
|
| 115 |
|
| 116 |
def send_request(self, code, previous_code, message, chat_history, chat_display):
|
| 117 |
if code != previous_code:
|
|
@@ -128,7 +135,6 @@ class LLMManager:
|
|
| 128 |
|
| 129 |
return chat_history, chat_display, "", code
|
| 130 |
|
| 131 |
-
# TODO: implement both streaming and non-streaming versions
|
| 132 |
def end_interview_prepare_messages(self, problem_description, chat_history):
|
| 133 |
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
|
| 134 |
|
|
|
|
| 21 |
|
| 22 |
if self.streaming:
|
| 23 |
self.end_interview = self.end_interview_stream
|
| 24 |
+
self.get_problem = self.get_problem_stream
|
| 25 |
else:
|
| 26 |
self.end_interview = self.end_interview_full
|
| 27 |
+
self.get_problem = self.get_problem_full
|
| 28 |
|
| 29 |
def text_processor(self):
|
| 30 |
def ans_full(response):
|
|
|
|
| 94 |
{"role": "system", "content": f"The candidate is solving the following problem: {problem}"},
|
| 95 |
]
|
| 96 |
|
| 97 |
+
def get_problem_prepare_messages(self, requirements, difficulty, topic):
|
| 98 |
full_prompt = (
|
| 99 |
f"Create a {difficulty} {topic} coding problem. "
|
| 100 |
f"Additional requirements: {requirements}. "
|
|
|
|
| 105 |
if self.is_demo:
|
| 106 |
full_prompt += f" Keep your response very short and simple, no more than {self.demo_word_limit} words."
|
| 107 |
|
| 108 |
+
messages = [
|
| 109 |
+
{"role": "system", "content": self.prompts["problem_generation_prompt"]},
|
| 110 |
+
{"role": "user", "content": full_prompt},
|
| 111 |
+
]
|
| 112 |
+
|
| 113 |
+
return messages
|
| 114 |
+
|
| 115 |
+
def get_problem_full(self, requirements, difficulty, topic):
|
| 116 |
+
messages = self.get_problem_prepare_messages(requirements, difficulty, topic)
|
| 117 |
+
return self.get_text(messages)
|
| 118 |
|
| 119 |
+
def get_problem_stream(self, requirements, difficulty, topic):
|
| 120 |
+
messages = self.get_problem_prepare_messages(requirements, difficulty, topic)
|
| 121 |
+
yield from self.get_text_stream(messages)
|
| 122 |
|
| 123 |
def send_request(self, code, previous_code, message, chat_history, chat_display):
|
| 124 |
if code != previous_code:
|
|
|
|
| 135 |
|
| 136 |
return chat_history, chat_display, "", code
|
| 137 |
|
|
|
|
| 138 |
def end_interview_prepare_messages(self, problem_description, chat_history):
|
| 139 |
transcript = [f"{message['role'].capitalize()}: {message['content']}" for message in chat_history[1:]]
|
| 140 |
|
app.py
CHANGED
|
@@ -146,8 +146,10 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 146 |
).then(fn=hide_settings, outputs=[init_acc, start_btn]).then(
|
| 147 |
fn=llm.get_problem,
|
| 148 |
inputs=[requirements, difficulty_select, topic_select],
|
| 149 |
-
outputs=[description
|
| 150 |
scroll_to_output=True,
|
|
|
|
|
|
|
| 151 |
).then(
|
| 152 |
fn=show_solution, outputs=[solution_acc, end_btn, audio_input]
|
| 153 |
)
|
|
|
|
| 146 |
).then(fn=hide_settings, outputs=[init_acc, start_btn]).then(
|
| 147 |
fn=llm.get_problem,
|
| 148 |
inputs=[requirements, difficulty_select, topic_select],
|
| 149 |
+
outputs=[description],
|
| 150 |
scroll_to_output=True,
|
| 151 |
+
).then(
|
| 152 |
+
fn=llm.init_bot, inputs=[description], outputs=[chat_history]
|
| 153 |
).then(
|
| 154 |
fn=show_solution, outputs=[solution_acc, end_btn, audio_input]
|
| 155 |
)
|