Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -141,7 +141,7 @@ def generate_response(instruction, model_path, progress=gr.Progress()):
|
|
| 141 |
except Exception as e:
|
| 142 |
return f"Error generating response: {str(e)}"
|
| 143 |
|
| 144 |
-
@spaces.GPU(duration=
|
| 145 |
def judge_responses(instruction, response1, response2, model_name, temperature=0.1, max_new_tokens=2048):
|
| 146 |
"""
|
| 147 |
Evaluate the quality of two responses
|
|
@@ -216,7 +216,7 @@ def judge_responses(instruction, response1, response2, model_name, temperature=0
|
|
| 216 |
|
| 217 |
yield result
|
| 218 |
|
| 219 |
-
@spaces.GPU(duration=
|
| 220 |
def generate_and_judge(instruction, model_dropdown_1, custom_model_1, model_dropdown_2, custom_model_2, judge_model_name, temperature=0.1, max_new_tokens=2048, progress=gr.Progress()):
|
| 221 |
"""Generate responses from two models and judge them"""
|
| 222 |
progress(0, desc="Starting generation process")
|
|
|
|
| 141 |
except Exception as e:
|
| 142 |
return f"Error generating response: {str(e)}"
|
| 143 |
|
| 144 |
+
@spaces.GPU(duration=200)
|
| 145 |
def judge_responses(instruction, response1, response2, model_name, temperature=0.1, max_new_tokens=2048):
|
| 146 |
"""
|
| 147 |
Evaluate the quality of two responses
|
|
|
|
| 216 |
|
| 217 |
yield result
|
| 218 |
|
| 219 |
+
@spaces.GPU(duration=200)
|
| 220 |
def generate_and_judge(instruction, model_dropdown_1, custom_model_1, model_dropdown_2, custom_model_2, judge_model_name, temperature=0.1, max_new_tokens=2048, progress=gr.Progress()):
|
| 221 |
"""Generate responses from two models and judge them"""
|
| 222 |
progress(0, desc="Starting generation process")
|