Update app.py
Browse files
app.py
CHANGED
|
@@ -216,15 +216,26 @@ def stream_iberotales_response(
|
|
| 216 |
conversation.append({"role": "user", "content": user_message})
|
| 217 |
|
| 218 |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
full_response = ""
|
| 230 |
messages = []
|
|
@@ -382,7 +393,7 @@ custom_css = """
|
|
| 382 |
"""
|
| 383 |
|
| 384 |
# Crear la interfaz
|
| 385 |
-
with gr.Blocks(
|
| 386 |
# Header con informaci贸n del proyecto
|
| 387 |
with gr.Row():
|
| 388 |
with gr.Column():
|
|
|
|
| 216 |
conversation.append({"role": "user", "content": user_message})
|
| 217 |
|
| 218 |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 219 |
+
if isinstance(model, Llama):
|
| 220 |
+
response = model.create_completion(
|
| 221 |
+
prompt=prompt,
|
| 222 |
+
max_tokens=max_new_tokens,
|
| 223 |
+
temperature=temperature,
|
| 224 |
+
top_p=top_p,
|
| 225 |
+
top_k=top_k,
|
| 226 |
+
repeat_penalty=repetition_penalty,
|
| 227 |
+
stream=True
|
| 228 |
+
)
|
| 229 |
+
else:
|
| 230 |
+
response = model(
|
| 231 |
+
prompt,
|
| 232 |
+
max_tokens=max_new_tokens,
|
| 233 |
+
temperature=temperature,
|
| 234 |
+
top_p=top_p,
|
| 235 |
+
top_k=top_k,
|
| 236 |
+
repetition_penalty=repetition_penalty,
|
| 237 |
+
stream=True
|
| 238 |
+
)
|
| 239 |
|
| 240 |
full_response = ""
|
| 241 |
messages = []
|
|
|
|
| 393 |
"""
|
| 394 |
|
| 395 |
# Crear la interfaz
|
| 396 |
+
with gr.Blocks(title="Iberotales", css=custom_css) as demo:
|
| 397 |
# Header con informaci贸n del proyecto
|
| 398 |
with gr.Row():
|
| 399 |
with gr.Column():
|