Update README.md
Browse files
README.md
CHANGED
|
@@ -84,7 +84,7 @@ model = HunYuanVLForConditionalGeneration.from_pretrained(
|
|
| 84 |
with torch.no_grad():
|
| 85 |
device = next(model.parameters()).device
|
| 86 |
inputs = inputs.to(device)
|
| 87 |
-
generated_ids = model.generate(**inputs, max_new_tokens=
|
| 88 |
if "input_ids" in inputs:
|
| 89 |
input_ids = inputs.input_ids
|
| 90 |
else:
|
|
@@ -114,9 +114,9 @@ from PIL import Image
|
|
| 114 |
from transformers import AutoProcessor
|
| 115 |
|
| 116 |
model_path = "tencent/HunyuanOCR"
|
| 117 |
-
llm = LLM(model=model_path
|
| 118 |
processor = AutoProcessor.from_pretrained(model_path)
|
| 119 |
-
sampling_params = SamplingParams(temperature=
|
| 120 |
|
| 121 |
img_path = "/path/to/image.jpg"
|
| 122 |
img = Image.open(img_path)
|
|
|
|
| 84 |
with torch.no_grad():
|
| 85 |
device = next(model.parameters()).device
|
| 86 |
inputs = inputs.to(device)
|
| 87 |
+
generated_ids = model.generate(**inputs, max_new_tokens=16384, do_sample=False)
|
| 88 |
if "input_ids" in inputs:
|
| 89 |
input_ids = inputs.input_ids
|
| 90 |
else:
|
|
|
|
| 114 |
from transformers import AutoProcessor
|
| 115 |
|
| 116 |
model_path = "tencent/HunyuanOCR"
|
| 117 |
+
llm = LLM(model=model_path)
|
| 118 |
processor = AutoProcessor.from_pretrained(model_path)
|
| 119 |
+
sampling_params = SamplingParams(temperature=1, max_tokens=16384)
|
| 120 |
|
| 121 |
img_path = "/path/to/image.jpg"
|
| 122 |
img = Image.open(img_path)
|