Spaces:
Paused
Paused
fix the default adapter model
Browse files
llama_lora/ui/inference_ui.py
CHANGED
|
@@ -229,7 +229,7 @@ def reload_selections(current_lora_model, current_prompt_template):
|
|
| 229 |
current_prompt_template = current_prompt_template or next(
|
| 230 |
iter(available_template_names_with_none), None)
|
| 231 |
|
| 232 |
-
default_lora_models = ["winglian/llama-adapter"]
|
| 233 |
available_lora_models = default_lora_models + get_available_lora_model_names()
|
| 234 |
available_lora_models = available_lora_models + ["None"]
|
| 235 |
|
|
@@ -303,7 +303,7 @@ def inference_ui():
|
|
| 303 |
lora_model = gr.Dropdown(
|
| 304 |
label="LoRA Model",
|
| 305 |
elem_id="inference_lora_model",
|
| 306 |
-
value="
|
| 307 |
allow_custom_value=True,
|
| 308 |
)
|
| 309 |
prompt_template = gr.Dropdown(
|
|
|
|
| 229 |
current_prompt_template = current_prompt_template or next(
|
| 230 |
iter(available_template_names_with_none), None)
|
| 231 |
|
| 232 |
+
default_lora_models = ["winglian/llama-adapter-7b"]
|
| 233 |
available_lora_models = default_lora_models + get_available_lora_model_names()
|
| 234 |
available_lora_models = available_lora_models + ["None"]
|
| 235 |
|
|
|
|
| 303 |
lora_model = gr.Dropdown(
|
| 304 |
label="LoRA Model",
|
| 305 |
elem_id="inference_lora_model",
|
| 306 |
+
value="winglian/llama-adapter-7b",
|
| 307 |
allow_custom_value=True,
|
| 308 |
)
|
| 309 |
prompt_template = gr.Dropdown(
|