Update README.md
Browse files
README.md
CHANGED
|
@@ -15,16 +15,12 @@ from transformers import Gemma3ForConditionalGeneration
|
|
| 15 |
text_encoder = Gemma3ForConditionalGeneration.from_pretrained(
|
| 16 |
repo,
|
| 17 |
subfolder="text_encoder",
|
| 18 |
-
quantization_config=quant_config,
|
| 19 |
-
torch_dtype=torch.float16,
|
| 20 |
)
|
| 21 |
|
| 22 |
### transformer
|
| 23 |
transformer_4bit = AutoModel.from_pretrained(
|
| 24 |
repo,
|
| 25 |
subfolder="transformer",
|
| 26 |
-
quantization_config=quant_config,
|
| 27 |
-
torch_dtype=torch.float16,
|
| 28 |
)
|
| 29 |
pipeline = LTX2Pipeline.from_pretrained("smthem/ltx-2-19b-dev-diffusers-test",transformer=transformer_4bit,text_encoder=text_encoder,torch_dtype=torch.float16,)
|
| 30 |
pipeline.enable_model_cpu_offload()
|
|
|
|
| 15 |
text_encoder = Gemma3ForConditionalGeneration.from_pretrained(
|
| 16 |
repo,
|
| 17 |
subfolder="text_encoder",
|
|
|
|
|
|
|
| 18 |
)
|
| 19 |
|
| 20 |
### transformer
|
| 21 |
transformer_4bit = AutoModel.from_pretrained(
|
| 22 |
repo,
|
| 23 |
subfolder="transformer",
|
|
|
|
|
|
|
| 24 |
)
|
| 25 |
pipeline = LTX2Pipeline.from_pretrained("smthem/ltx-2-19b-dev-diffusers-test",transformer=transformer_4bit,text_encoder=text_encoder,torch_dtype=torch.float16,)
|
| 26 |
pipeline.enable_model_cpu_offload()
|