Spaces:
Sleeping
Sleeping
Commit
·
eef0040
1
Parent(s):
3a5dbe6
Use gr.update
Browse files
app.py
CHANGED
|
@@ -15,38 +15,6 @@ llm = LLMManager(config, prompts)
|
|
| 15 |
tts = TTSManager(config)
|
| 16 |
stt = STTManager(config)
|
| 17 |
|
| 18 |
-
default_audio_params = {
|
| 19 |
-
"label": "Record answer",
|
| 20 |
-
"sources": ["microphone"],
|
| 21 |
-
"type": "numpy",
|
| 22 |
-
"waveform_options": {"show_controls": False},
|
| 23 |
-
"editable": False,
|
| 24 |
-
"container": False,
|
| 25 |
-
"show_share_button": False,
|
| 26 |
-
"streaming": stt.streaming,
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def hide_settings():
|
| 31 |
-
init_acc = gr.Accordion("Settings", open=False)
|
| 32 |
-
start_btn = gr.Button("Generate a problem", interactive=False)
|
| 33 |
-
return init_acc, start_btn
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
def show_solution():
|
| 37 |
-
solution_acc = gr.Accordion("Solution", open=True)
|
| 38 |
-
end_btn = gr.Button("Finish the interview", interactive=True)
|
| 39 |
-
audio_input = gr.Audio(interactive=True, **default_audio_params)
|
| 40 |
-
return solution_acc, end_btn, audio_input
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
def hide_solution():
|
| 44 |
-
solution_acc = gr.Accordion("Solution", open=False)
|
| 45 |
-
end_btn = gr.Button("Finish the interview", interactive=False)
|
| 46 |
-
problem_acc = gr.Accordion("Problem statement", open=False)
|
| 47 |
-
audio_input = gr.Audio(interactive=False, **default_audio_params)
|
| 48 |
-
return solution_acc, end_btn, problem_acc, audio_input
|
| 49 |
-
|
| 50 |
|
| 51 |
def get_status_color(obj):
|
| 52 |
if obj.status:
|
|
@@ -89,6 +57,16 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 89 |
chat_example = gr.Chatbot(
|
| 90 |
label="Chat", show_label=False, show_share_button=False, value=[["Candidate message", "Interviewer message"]]
|
| 91 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
audio_input_example = gr.Audio(interactive=True, **default_audio_params)
|
| 93 |
gr.Markdown(instruction["models"])
|
| 94 |
gr.Markdown(instruction["acknowledgements"])
|
|
@@ -157,7 +135,7 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 157 |
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).success(
|
| 158 |
fn=lambda: True, outputs=[started_coding]
|
| 159 |
).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
|
| 160 |
-
fn=
|
| 161 |
).success(
|
| 162 |
fn=llm.get_problem,
|
| 163 |
inputs=[requirements, difficulty_select, topic_select],
|
|
@@ -166,16 +144,18 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 166 |
).success(
|
| 167 |
fn=llm.init_bot, inputs=[description], outputs=[chat_history]
|
| 168 |
).success(
|
| 169 |
-
fn=
|
|
|
|
| 170 |
)
|
| 171 |
|
| 172 |
end_btn.click(
|
| 173 |
fn=add_interviewer_message(fixed_messages["end"]),
|
| 174 |
inputs=[chat],
|
| 175 |
outputs=[chat],
|
|
|
|
|
|
|
|
|
|
| 176 |
).success(
|
| 177 |
-
fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]
|
| 178 |
-
).success(fn=hide_solution, outputs=[solution_acc, end_btn, problem_acc, audio_input]).success(
|
| 179 |
fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback]
|
| 180 |
)
|
| 181 |
|
|
@@ -184,7 +164,7 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 184 |
inputs=[code, previous_code, chat_history, chat],
|
| 185 |
outputs=[chat_history, chat, previous_code],
|
| 186 |
).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
|
| 187 |
-
fn=lambda: gr.
|
| 188 |
).success(
|
| 189 |
fn=lambda: np.array([], dtype=np.int16), outputs=[audio_buffer]
|
| 190 |
).success(
|
|
@@ -198,10 +178,10 @@ with gr.Blocks(title="AI Interviewer") as demo:
|
|
| 198 |
outputs=[transcript, audio_buffer, message],
|
| 199 |
show_progress="hidden",
|
| 200 |
)
|
| 201 |
-
audio_input.stop_recording(fn=lambda: gr.
|
| 202 |
else:
|
| 203 |
audio_input.stop_recording(fn=stt.speech_to_text_full, inputs=[audio_input], outputs=[message]).success(
|
| 204 |
-
fn=lambda: gr.
|
| 205 |
).success(fn=lambda: None, outputs=[audio_input])
|
| 206 |
|
| 207 |
demo.launch(show_api=False)
|
|
|
|
| 15 |
tts = TTSManager(config)
|
| 16 |
stt = STTManager(config)
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
def get_status_color(obj):
|
| 20 |
if obj.status:
|
|
|
|
| 57 |
chat_example = gr.Chatbot(
|
| 58 |
label="Chat", show_label=False, show_share_button=False, value=[["Candidate message", "Interviewer message"]]
|
| 59 |
)
|
| 60 |
+
default_audio_params = {
|
| 61 |
+
"label": "Record answer",
|
| 62 |
+
"sources": ["microphone"],
|
| 63 |
+
"type": "numpy",
|
| 64 |
+
"waveform_options": {"show_controls": False},
|
| 65 |
+
"editable": False,
|
| 66 |
+
"container": False,
|
| 67 |
+
"show_share_button": False,
|
| 68 |
+
"streaming": stt.streaming,
|
| 69 |
+
}
|
| 70 |
audio_input_example = gr.Audio(interactive=True, **default_audio_params)
|
| 71 |
gr.Markdown(instruction["models"])
|
| 72 |
gr.Markdown(instruction["acknowledgements"])
|
|
|
|
| 135 |
start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).success(
|
| 136 |
fn=lambda: True, outputs=[started_coding]
|
| 137 |
).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
|
| 138 |
+
fn=lambda: (gr.update(open=False), gr.update(interactive=False)), outputs=[init_acc, start_btn]
|
| 139 |
).success(
|
| 140 |
fn=llm.get_problem,
|
| 141 |
inputs=[requirements, difficulty_select, topic_select],
|
|
|
|
| 144 |
).success(
|
| 145 |
fn=llm.init_bot, inputs=[description], outputs=[chat_history]
|
| 146 |
).success(
|
| 147 |
+
fn=lambda: (gr.update(open=True), gr.update(interactive=True), gr.update(interactive=True)),
|
| 148 |
+
outputs=[solution_acc, end_btn, audio_input],
|
| 149 |
)
|
| 150 |
|
| 151 |
end_btn.click(
|
| 152 |
fn=add_interviewer_message(fixed_messages["end"]),
|
| 153 |
inputs=[chat],
|
| 154 |
outputs=[chat],
|
| 155 |
+
).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
|
| 156 |
+
fn=lambda: (gr.update(open=False), gr.update(interactive=False), gr.update(open=False), gr.update(interactive=False)),
|
| 157 |
+
outputs=[solution_acc, end_btn, problem_acc, audio_input],
|
| 158 |
).success(
|
|
|
|
|
|
|
| 159 |
fn=llm.end_interview, inputs=[description, chat_history], outputs=[feedback]
|
| 160 |
)
|
| 161 |
|
|
|
|
| 164 |
inputs=[code, previous_code, chat_history, chat],
|
| 165 |
outputs=[chat_history, chat, previous_code],
|
| 166 |
).success(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output]).success(
|
| 167 |
+
fn=lambda: gr.update(interactive=False), outputs=[send_btn]
|
| 168 |
).success(
|
| 169 |
fn=lambda: np.array([], dtype=np.int16), outputs=[audio_buffer]
|
| 170 |
).success(
|
|
|
|
| 178 |
outputs=[transcript, audio_buffer, message],
|
| 179 |
show_progress="hidden",
|
| 180 |
)
|
| 181 |
+
audio_input.stop_recording(fn=lambda: gr.update(interactive=True), outputs=[send_btn])
|
| 182 |
else:
|
| 183 |
audio_input.stop_recording(fn=stt.speech_to_text_full, inputs=[audio_input], outputs=[message]).success(
|
| 184 |
+
fn=lambda: gr.update(interactive=True), outputs=[send_btn]
|
| 185 |
).success(fn=lambda: None, outputs=[audio_input])
|
| 186 |
|
| 187 |
demo.launch(show_api=False)
|