img to rgb
Browse files
README.md
CHANGED
|
@@ -67,17 +67,19 @@ from huggingface_hub import hf_hub_download
|
|
| 67 |
|
| 68 |
# This is an example image we provide
|
| 69 |
path = hf_hub_download(repo_id="StonyBrook-CVLab/PixCell-256", filename="test_image.png")
|
| 70 |
-
image = Image.open(path)
|
| 71 |
|
| 72 |
# Extract UNI embedding from the image
|
| 73 |
uni_inp = transform(image).unsqueeze(dim=0)
|
| 74 |
with torch.inference_mode():
|
| 75 |
uni_emb = uni_model(uni_inp.to(device))
|
| 76 |
|
|
|
|
|
|
|
| 77 |
print("Extracted UNI:", uni_emb.shape)
|
| 78 |
|
| 79 |
# Get unconditional embedding for classifier-free guidance
|
| 80 |
uncond = pipeline.get_unconditional_embedding(uni_emb.shape[0])
|
| 81 |
# Generate new samples
|
| 82 |
-
samples = pipeline(uni_embeds=uni_emb, negative_uni_embeds=uncond, guidance_scale=3., num_images_per_prompt=1)
|
| 83 |
```
|
|
|
|
| 67 |
|
| 68 |
# This is an example image we provide
|
| 69 |
path = hf_hub_download(repo_id="StonyBrook-CVLab/PixCell-256", filename="test_image.png")
|
| 70 |
+
image = Image.open(path).convert("RGB")
|
| 71 |
|
| 72 |
# Extract UNI embedding from the image
|
| 73 |
uni_inp = transform(image).unsqueeze(dim=0)
|
| 74 |
with torch.inference_mode():
|
| 75 |
uni_emb = uni_model(uni_inp.to(device))
|
| 76 |
|
| 77 |
+
# reshape UNI to (bs, 1, D)
|
| 78 |
+
uni_emb = uni_emb.unsqueeze(1)
|
| 79 |
print("Extracted UNI:", uni_emb.shape)
|
| 80 |
|
| 81 |
# Get unconditional embedding for classifier-free guidance
|
| 82 |
uncond = pipeline.get_unconditional_embedding(uni_emb.shape[0])
|
| 83 |
# Generate new samples
|
| 84 |
+
samples = pipeline(uni_embeds=uni_emb, negative_uni_embeds=uncond, guidance_scale=3., num_images_per_prompt=1).images
|
| 85 |
```
|