Spaces:
Paused
Paused
Yaron Koresh
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -30,15 +30,15 @@ height=896
|
|
| 30 |
device = "cuda"
|
| 31 |
dtype = torch.float16
|
| 32 |
result=[]
|
| 33 |
-
step =
|
| 34 |
-
accu=
|
| 35 |
repo = "ByteDance/AnimateDiff-Lightning"
|
| 36 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 37 |
-
base = "emilianJR/epiCRealism"
|
| 38 |
-
|
| 39 |
-
|
| 40 |
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
|
| 41 |
-
|
| 42 |
|
| 43 |
css="""
|
| 44 |
input, input::placeholder {
|
|
@@ -141,6 +141,7 @@ def Piper(image,positive,negative,motion):
|
|
| 141 |
pipe.unload_lora_weights()
|
| 142 |
if motion != "":
|
| 143 |
pipe.load_lora_weights(motion, adapter_name="motion")
|
|
|
|
| 144 |
pipe.set_adapters(["motion"], [0.7])
|
| 145 |
last_motion = motion
|
| 146 |
|
|
@@ -193,7 +194,7 @@ def run(i,m,p1,p2,*result):
|
|
| 193 |
|
| 194 |
return out
|
| 195 |
|
| 196 |
-
pipe = AnimateDiffPipeline.from_pretrained(base, torch_dtype=dtype).to(device)
|
| 197 |
pipe.scheduler = DDIMScheduler(
|
| 198 |
clip_sample=False,
|
| 199 |
beta_start=0.00085,
|
|
|
|
| 30 |
device = "cuda"
|
| 31 |
dtype = torch.float16
|
| 32 |
result=[]
|
| 33 |
+
step = 30
|
| 34 |
+
accu=7.5
|
| 35 |
repo = "ByteDance/AnimateDiff-Lightning"
|
| 36 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 37 |
+
#base = "emilianJR/epiCRealism"
|
| 38 |
+
base = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
| 39 |
+
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device, dtype=dtype)
|
| 40 |
#unet = UNet2DConditionModel.from_config("emilianJR/epiCRealism",subfolder="unet").to(device, dtype).load_state_dict(load_file(hf_hub_download("emilianJR/epiCRealism", "unet/diffusion_pytorch_model.safetensors"), device=device), strict=False)
|
| 41 |
+
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=dtype, device=device)
|
| 42 |
|
| 43 |
css="""
|
| 44 |
input, input::placeholder {
|
|
|
|
| 141 |
pipe.unload_lora_weights()
|
| 142 |
if motion != "":
|
| 143 |
pipe.load_lora_weights(motion, adapter_name="motion")
|
| 144 |
+
pipe.fuse_lora()
|
| 145 |
pipe.set_adapters(["motion"], [0.7])
|
| 146 |
last_motion = motion
|
| 147 |
|
|
|
|
| 194 |
|
| 195 |
return out
|
| 196 |
|
| 197 |
+
pipe = AnimateDiffPipeline.from_pretrained(base, vae=vae, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
| 198 |
pipe.scheduler = DDIMScheduler(
|
| 199 |
clip_sample=False,
|
| 200 |
beta_start=0.00085,
|