weathon
commited on
Commit
·
1371493
1
Parent(s):
f88e432
compare
Browse files- .gradio/certificate.pem +31 -0
- app.py +43 -16
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
app.py
CHANGED
|
@@ -27,12 +27,21 @@ pipe = VSFStableDiffusion3Pipeline.from_pretrained(
|
|
| 27 |
torch_dtype=torch.bfloat16,
|
| 28 |
hf_token=os.environ.get("HF_TOKEN", None)
|
| 29 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
import os
|
| 31 |
@spaces.GPU
|
| 32 |
-
def generate_video(positive_prompt, negative_prompt, guidance_scale, bias, step, seed, progress=gr.Progress(track_tqdm=False)):
|
| 33 |
-
global pipe
|
| 34 |
lambda total: progress.tqdm(range(total))
|
| 35 |
-
|
| 36 |
print(f"Generating image with params: {positive_prompt}, {negative_prompt}, {guidance_scale}, {bias}, {step}")
|
| 37 |
|
| 38 |
output = pipe(
|
|
@@ -46,19 +55,25 @@ def generate_video(positive_prompt, negative_prompt, guidance_scale, bias, step,
|
|
| 46 |
path = f"images/{uuid.uuid4().hex}.png"
|
| 47 |
output.save(path)
|
| 48 |
output_path = path
|
| 49 |
-
with open(output_path.replace(".png", ".txt"), "w") as f:
|
| 50 |
-
f.write(f"Positive Prompt: {positive_prompt}\n")
|
| 51 |
-
f.write(f"Negative Prompt: {negative_prompt}\n")
|
| 52 |
-
f.write(f"Guidance Scale: {guidance_scale}\n")
|
| 53 |
-
f.write(f"Bias: {bias}\n")
|
| 54 |
-
f.write(f"Steps: {step}\n")
|
| 55 |
-
f.write(f"Seed: {seed}\n")
|
| 56 |
print(f"Image saved to {output_path}")
|
| 57 |
-
return output_path
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
with gr.Blocks(title="Value Sign Flip Wan 2.1 Demo") as demo:
|
| 61 |
-
gr.Markdown("# Value Sign Flip Wan 2.1 Demo \n\n This demo is based on SD3.5-L-Turbo model and uses Value Sign Flip technique to generate videos with different guidance scales and biases. More on [GitHub](https://github.com/weathon/VSF/blob/main/wan.md)\n\nPositive prompt should be at least
|
| 62 |
# gr.Markdown("# Value Sign Flip Wan 2.1 Demo \n\n This demo is based on Wan 2.1 T2V model and uses Value Sign Flip technique to generate videos with different guidance scales and biases. More on [GitHub](https://github.com/weathon/VSF/blob/main/wan.md)\n\nPositive prompt should be at least 2 sentence long or the results will be weird.")
|
| 63 |
|
| 64 |
with gr.Row():
|
|
@@ -66,14 +81,26 @@ with gr.Blocks(title="Value Sign Flip Wan 2.1 Demo") as demo:
|
|
| 66 |
neg = gr.Textbox(label="Negative Prompt", value="wheels")
|
| 67 |
|
| 68 |
with gr.Row():
|
|
|
|
| 69 |
guidance = gr.Slider(0, 5, step=0.1, label="Guidance Scale", value=3.0)
|
| 70 |
bias = gr.Slider(0, 0.5, step=0.01, label="Bias", value=0.1)
|
| 71 |
-
step = gr.Slider(
|
| 72 |
seed = gr.Number(label="Seed", value=0, precision=0)
|
| 73 |
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
btn = gr.Button("Generate")
|
| 77 |
-
btn.click(fn=generate_video, inputs=[pos, neg, guidance, bias, step, seed], outputs=
|
| 78 |
|
| 79 |
-
demo.launch()
|
|
|
|
| 27 |
torch_dtype=torch.bfloat16,
|
| 28 |
hf_token=os.environ.get("HF_TOKEN", None)
|
| 29 |
)
|
| 30 |
+
|
| 31 |
+
from nag import NAGStableDiffusion3Pipeline
|
| 32 |
+
nag_pipe = NAGStableDiffusion3Pipeline.from_pretrained(
|
| 33 |
+
model_id,
|
| 34 |
+
torch_dtype=torch.bfloat16,
|
| 35 |
+
token="hf_token",
|
| 36 |
+
)
|
| 37 |
+
pipe = pipe.to("cuda")
|
| 38 |
+
nag_pipe = nag_pipe.to("cuda")
|
| 39 |
import os
|
| 40 |
@spaces.GPU
|
| 41 |
+
def generate_video(positive_prompt, negative_prompt, guidance_scale, bias, step, seed, nag_guidance, nag_alpha, nag_tau, nag_step, progress=gr.Progress(track_tqdm=False)):
|
| 42 |
+
global pipe, nag_pipe
|
| 43 |
lambda total: progress.tqdm(range(total))
|
| 44 |
+
|
| 45 |
print(f"Generating image with params: {positive_prompt}, {negative_prompt}, {guidance_scale}, {bias}, {step}")
|
| 46 |
|
| 47 |
output = pipe(
|
|
|
|
| 55 |
path = f"images/{uuid.uuid4().hex}.png"
|
| 56 |
output.save(path)
|
| 57 |
output_path = path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
print(f"Image saved to {output_path}")
|
|
|
|
| 59 |
|
| 60 |
+
output_nag = nag_pipe(
|
| 61 |
+
prompt=positive_prompt,
|
| 62 |
+
negative_prompt=negative_prompt,
|
| 63 |
+
num_inference_steps=nag_step,
|
| 64 |
+
nag_scale=nag_guidance,
|
| 65 |
+
nag_alpha=nag_alpha,
|
| 66 |
+
nag_tau=nag_tau,
|
| 67 |
+
guidance_scale=0.0,
|
| 68 |
+
).images[0]
|
| 69 |
+
nag_path = f"images/{uuid.uuid4().hex}_nag.png"
|
| 70 |
+
output_nag.save(nag_path)
|
| 71 |
+
print(f"NAG Image saved to {nag_path}")
|
| 72 |
+
|
| 73 |
+
return output_path, nag_path
|
| 74 |
|
| 75 |
with gr.Blocks(title="Value Sign Flip Wan 2.1 Demo") as demo:
|
| 76 |
+
gr.Markdown("# Value Sign Flip Wan 2.1 Demo \n\n This demo is based on SD3.5-L-Turbo model and uses Value Sign Flip technique to generate videos with different guidance scales and biases. More on [GitHub](https://github.com/weathon/VSF/blob/main/wan.md)\n\nPositive prompt should be at least 1 sentence long or the results will be weird.")
|
| 77 |
# gr.Markdown("# Value Sign Flip Wan 2.1 Demo \n\n This demo is based on Wan 2.1 T2V model and uses Value Sign Flip technique to generate videos with different guidance scales and biases. More on [GitHub](https://github.com/weathon/VSF/blob/main/wan.md)\n\nPositive prompt should be at least 2 sentence long or the results will be weird.")
|
| 78 |
|
| 79 |
with gr.Row():
|
|
|
|
| 81 |
neg = gr.Textbox(label="Negative Prompt", value="wheels")
|
| 82 |
|
| 83 |
with gr.Row():
|
| 84 |
+
gr.Markdown("## VSF Generation Parameters")
|
| 85 |
guidance = gr.Slider(0, 5, step=0.1, label="Guidance Scale", value=3.0)
|
| 86 |
bias = gr.Slider(0, 0.5, step=0.01, label="Bias", value=0.1)
|
| 87 |
+
step = gr.Slider(4, 15, step=1, label="Step", value=8)
|
| 88 |
seed = gr.Number(label="Seed", value=0, precision=0)
|
| 89 |
|
| 90 |
+
with gr.Row():
|
| 91 |
+
gr.Markdown("## NAG Generation Parameters")
|
| 92 |
+
nag_guidance = gr.Slider(1, 10, step=0.1, label="Guidance Scale", value=5)
|
| 93 |
+
nag_alpha = gr.Slider(0.1, 1.0, step=0.01, label="Alpha", value=0.25)
|
| 94 |
+
nag_tau = gr.Slider(1, 10, step=0.01, label="Tau", value=3.0)
|
| 95 |
+
nag_step = gr.Slider(4, 15, step=1, label="Step", value=8)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
with gr.Row():
|
| 100 |
+
vsf_out = gr.Image(label="VSF Generated Image")
|
| 101 |
+
nag_out = gr.Image(label="NAG Generated Image")
|
| 102 |
|
| 103 |
btn = gr.Button("Generate")
|
| 104 |
+
btn.click(fn=generate_video, inputs=[pos, neg, guidance, bias, step, seed, nag_guidance, nag_alpha, nag_tau, nag_step], outputs=[vsf_out, nag_out])
|
| 105 |
|
| 106 |
+
demo.launch(share=True)
|