Spaces:
Running
on
Zero
Running
on
Zero
rizavelioglu
commited on
Commit
·
eea9702
1
Parent(s):
16ab8a3
- add new AEs: Flux-2 and fal/TAE
Browse files
app.py
CHANGED
|
@@ -65,6 +65,8 @@ class VAETester:
|
|
| 65 |
"playground-v2.5": AutoencoderKL.from_pretrained("playgroundai/playground-v2.5-1024px-aesthetic", subfolder="vae").to(self.device),
|
| 66 |
# "dc-ae-f32c32-sana-1.0": AutoencoderDC.from_pretrained("mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers").to(self.device),
|
| 67 |
"FLUX.1-Kontext": AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", subfolder="vae").to(self.device),
|
|
|
|
|
|
|
| 68 |
}
|
| 69 |
# Define the desired order of models
|
| 70 |
order = [
|
|
@@ -82,6 +84,8 @@ class VAETester:
|
|
| 82 |
"CogView4-6B",
|
| 83 |
# "dc-ae-f32c32-sana-1.0",
|
| 84 |
"FLUX.1-Kontext",
|
|
|
|
|
|
|
| 85 |
]
|
| 86 |
|
| 87 |
# Construct the vae_models dictionary in the specified order
|
|
@@ -170,9 +174,20 @@ def test_all_vaes(image_path: str, tolerance: float, img_size: int):
|
|
| 170 |
return [None], [None], error_msg
|
| 171 |
|
| 172 |
examples = [f"examples/{img_filename}" for img_filename in sorted(os.listdir("examples/"))]
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
gr.Markdown("""
|
| 177 |
Upload an image or select an example to compare how different VAEs reconstruct it.
|
| 178 |
1. The image is padded to a square and resized to the selected size (512 or 1024 pixels).
|
|
|
|
| 65 |
"playground-v2.5": AutoencoderKL.from_pretrained("playgroundai/playground-v2.5-1024px-aesthetic", subfolder="vae").to(self.device),
|
| 66 |
# "dc-ae-f32c32-sana-1.0": AutoencoderDC.from_pretrained("mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers").to(self.device),
|
| 67 |
"FLUX.1-Kontext": AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", subfolder="vae").to(self.device),
|
| 68 |
+
"FLUX.2": AutoencoderKL.from_pretrained("black-forest-labs/FLUX.2-dev", subfolder="vae").to(self.device),
|
| 69 |
+
"FLUX.2-TinyAutoEncoder": AutoencoderKL.from_pretrained("fal/FLUX.2-Tiny-AutoEncoder").to(self.device),
|
| 70 |
}
|
| 71 |
# Define the desired order of models
|
| 72 |
order = [
|
|
|
|
| 84 |
"CogView4-6B",
|
| 85 |
# "dc-ae-f32c32-sana-1.0",
|
| 86 |
"FLUX.1-Kontext",
|
| 87 |
+
"FLUX.2",
|
| 88 |
+
"FLUX.2-TinyAutoEncoder",
|
| 89 |
]
|
| 90 |
|
| 91 |
# Construct the vae_models dictionary in the specified order
|
|
|
|
| 174 |
return [None], [None], error_msg
|
| 175 |
|
| 176 |
examples = [f"examples/{img_filename}" for img_filename in sorted(os.listdir("examples/"))]
|
| 177 |
+
custom_css = """
|
| 178 |
+
.center-header {
|
| 179 |
+
display: flex;
|
| 180 |
+
align-items: center;
|
| 181 |
+
justify-content: center;
|
| 182 |
+
margin: 0 0 10px 0;
|
| 183 |
+
}
|
| 184 |
+
.monospace-text {
|
| 185 |
+
font-family: 'Courier New', Courier, monospace;
|
| 186 |
+
}
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
with gr.Blocks(title="VAE Performance Tester", css=custom_css) as demo:
|
| 190 |
+
gr.Markdown("<div class='center-header'><h1>VAE Comparison Tool</h1></div>")
|
| 191 |
gr.Markdown("""
|
| 192 |
Upload an image or select an example to compare how different VAEs reconstruct it.
|
| 193 |
1. The image is padded to a square and resized to the selected size (512 or 1024 pixels).
|