Spaces:
Sleeping
Sleeping
fixes pad token id issue
Browse files
app.py
CHANGED
|
@@ -106,7 +106,11 @@ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloa
|
|
| 106 |
|
| 107 |
# Set generation config
|
| 108 |
model.generation_config = GenerationConfig.from_pretrained(model_name)
|
| 109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
model.generation_config.do_sample = True
|
| 111 |
model.generation_config.temperature = 0.6
|
| 112 |
model.generation_config.top_p = 0.95
|
|
|
|
| 106 |
|
| 107 |
# Set generation config
|
| 108 |
model.generation_config = GenerationConfig.from_pretrained(model_name)
|
| 109 |
+
# Ensure pad_token_id is an integer, not a list
|
| 110 |
+
if isinstance(model.generation_config.eos_token_id, list):
|
| 111 |
+
model.generation_config.pad_token_id = model.generation_config.eos_token_id[0]
|
| 112 |
+
else:
|
| 113 |
+
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
| 114 |
model.generation_config.do_sample = True
|
| 115 |
model.generation_config.temperature = 0.6
|
| 116 |
model.generation_config.top_p = 0.95
|