Datasets:
Update evaluation/run_generation_vllm.py
#5
by
future7
- opened
evaluation/run_generation_vllm.py
CHANGED
|
@@ -69,14 +69,20 @@ def main():
|
|
| 69 |
## This changes the GPU support to 8
|
| 70 |
model_vllm = LLM(model_path, tensor_parallel_size=8)
|
| 71 |
|
|
|
|
| 72 |
output_list = []
|
| 73 |
for prompt in prompt_list:
|
| 74 |
prompt = bos_token + prompt
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
generated_text = output.outputs[0].text
|
| 77 |
generated_text = generated_text.strip().replace("\n", " ")
|
| 78 |
|
| 79 |
-
|
| 80 |
output_list.append(generated_text)
|
| 81 |
|
| 82 |
print("writing to %s" % output_datapath)
|
|
|
|
| 69 |
## This changes the GPU support to 8
|
| 70 |
model_vllm = LLM(model_path, tensor_parallel_size=8)
|
| 71 |
|
| 72 |
+
input_list = []
|
| 73 |
output_list = []
|
| 74 |
for prompt in prompt_list:
|
| 75 |
prompt = bos_token + prompt
|
| 76 |
+
input_list.append(prompt)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
outputs = model_vllm.generate(input_list, sampling_params)[0]
|
| 80 |
+
|
| 81 |
+
for output in outputs:
|
| 82 |
generated_text = output.outputs[0].text
|
| 83 |
generated_text = generated_text.strip().replace("\n", " ")
|
| 84 |
|
| 85 |
+
print("generated_text:", generated_text)
|
| 86 |
output_list.append(generated_text)
|
| 87 |
|
| 88 |
print("writing to %s" % output_datapath)
|