Increase `max_length` of generated output.
#24
by
bezzam
HF Staff
- opened
README.md
CHANGED
|
@@ -71,7 +71,7 @@ for message in conversation:
|
|
| 71 |
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
| 72 |
inputs.input_ids = inputs.input_ids.to("cuda")
|
| 73 |
|
| 74 |
-
generate_ids = model.generate(**inputs, max_length=
|
| 75 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 76 |
|
| 77 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
@@ -119,7 +119,7 @@ for message in conversation:
|
|
| 119 |
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
| 120 |
inputs.input_ids = inputs.input_ids.to("cuda")
|
| 121 |
|
| 122 |
-
generate_ids = model.generate(**inputs, max_length=
|
| 123 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 124 |
|
| 125 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
|
| 71 |
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
| 72 |
inputs.input_ids = inputs.input_ids.to("cuda")
|
| 73 |
|
| 74 |
+
generate_ids = model.generate(**inputs, max_length=1024)
|
| 75 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 76 |
|
| 77 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
|
|
| 119 |
inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
|
| 120 |
inputs.input_ids = inputs.input_ids.to("cuda")
|
| 121 |
|
| 122 |
+
generate_ids = model.generate(**inputs, max_length=512)
|
| 123 |
generate_ids = generate_ids[:, inputs.input_ids.size(1):]
|
| 124 |
|
| 125 |
response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|