Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,7 +11,10 @@ client = AsyncInferenceClient(api_url)
|
|
| 11 |
|
| 12 |
|
| 13 |
system_message = """
|
|
|
|
| 14 |
Refactor the provided Python code to improve its maintainability and efficiency and reduce complexity. Include the refactored code along with the comments on the changes made for improving the metrics.
|
|
|
|
|
|
|
| 15 |
"""
|
| 16 |
title = "Python Refactoring"
|
| 17 |
description = """
|
|
@@ -87,12 +90,34 @@ def analyze_sales_data(sales_records):
|
|
| 87 |
"""]]
|
| 88 |
|
| 89 |
|
| 90 |
-
#
|
| 91 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
partial_message = ""
|
| 93 |
-
for token in client.text_generation(
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
yield partial_message
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
gr.ChatInterface(
|
| 98 |
inference,
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
system_message = """
|
| 14 |
+
### Instruction:
|
| 15 |
Refactor the provided Python code to improve its maintainability and efficiency and reduce complexity. Include the refactored code along with the comments on the changes made for improving the metrics.
|
| 16 |
+
### Input:
|
| 17 |
+
|
| 18 |
"""
|
| 19 |
title = "Python Refactoring"
|
| 20 |
description = """
|
|
|
|
| 90 |
"""]]
|
| 91 |
|
| 92 |
|
| 93 |
+
# Stream text - stream tokens with InferenceClient from TGI
|
| 94 |
+
async def predict(message, chatbot, temperature=0.9, max_new_tokens=4096, top_p=0.6, repetition_penalty=1.0,):
|
| 95 |
+
|
| 96 |
+
if system_prompt != "":
|
| 97 |
+
input_prompt = f"{system_prompt}"
|
| 98 |
+
|
| 99 |
+
temperature = float(temperature)
|
| 100 |
+
if temperature < 1e-2:
|
| 101 |
+
temperature = 1e-2
|
| 102 |
+
top_p = float(top_p)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
input_prompt = input_prompt + str(message) + " [/INST] "
|
| 106 |
+
|
| 107 |
partial_message = ""
|
| 108 |
+
async for token in await client.text_generation(prompt=input_prompt,
|
| 109 |
+
max_new_tokens=max_new_tokens,
|
| 110 |
+
stream=True,
|
| 111 |
+
best_of=1,
|
| 112 |
+
temperature=temperature,
|
| 113 |
+
top_p=top_p,
|
| 114 |
+
do_sample=True,
|
| 115 |
+
repetition_penalty=repetition_penalty):
|
| 116 |
+
partial_message = partial_message + token
|
| 117 |
yield partial_message
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
|
| 122 |
gr.ChatInterface(
|
| 123 |
inference,
|