dream2589632147 commited on
Commit
70fc441
·
verified ·
1 Parent(s): 74da07c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -22
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import spaces
2
  import torch
 
3
  from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
4
  from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
5
  from diffusers.utils.export_utils import export_to_video
@@ -12,13 +13,12 @@ import gc
12
 
13
  from torchao.quantization import quantize_
14
  from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
15
-
16
  import aoti
17
 
18
  # =========================================================
19
  # MODEL CONFIGURATION
20
  # =========================================================
21
- MODEL_ID = "dream2589632147/Dream-wan2-2-faster-Pro" # ← المسار الجديد للنموذج
22
 
23
  MAX_DIM = 832
24
  MIN_DIM = 480
@@ -26,32 +26,40 @@ SQUARE_DIM = 640
26
  MULTIPLE_OF = 16
27
 
28
  MAX_SEED = np.iinfo(np.int32).max
29
-
30
  FIXED_FPS = 16
31
  MIN_FRAMES_MODEL = 8
32
- MAX_FRAMES_MODEL = 80
33
-
34
  MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
35
  MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # =========================================================
38
  # LOAD PIPELINE
39
  # =========================================================
40
  pipe = WanImageToVideoPipeline.from_pretrained(
41
  MODEL_ID,
42
- transformer=WanTransformer3DModel.from_pretrained(
43
- MODEL_ID,
44
- subfolder="transformer",
45
- torch_dtype=torch.bfloat16,
46
- device_map="cuda",
47
- ),
48
- transformer_2=WanTransformer3DModel.from_pretrained(
49
- MODEL_ID,
50
- subfolder="transformer_2",
51
- torch_dtype=torch.bfloat16,
52
- device_map="cuda",
53
- ),
54
  torch_dtype=torch.bfloat16,
 
55
  ).to("cuda")
56
 
57
  # =========================================================
@@ -105,7 +113,6 @@ def resize_image(image: Image.Image) -> Image.Image:
105
  aspect_ratio = width / height
106
  MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
107
  MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
108
-
109
  image_to_resize = image
110
 
111
  if aspect_ratio > MAX_ASPECT_RATIO:
@@ -126,14 +133,12 @@ def resize_image(image: Image.Image) -> Image.Image:
126
 
127
  final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
128
  final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
129
-
130
  final_w = max(MIN_DIM, min(MAX_DIM, final_w))
131
  final_h = max(MIN_DIM, min(MAX_DIM, final_h))
132
-
133
  return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
134
 
135
  # =========================================================
136
- # UTILITY FUNCTIONS
137
  # =========================================================
138
  def get_num_frames(duration_seconds: float):
139
  return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
@@ -152,7 +157,7 @@ def get_duration(
152
  return 10 + int(steps) * step_duration
153
 
154
  # =========================================================
155
- # MAIN GENERATION FUNCTION
156
  # =========================================================
157
  @spaces.GPU(duration=get_duration)
158
  def generate_video(
 
1
  import spaces
2
  import torch
3
+ import os # ← مضاف
4
  from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
5
  from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
6
  from diffusers.utils.export_utils import export_to_video
 
13
 
14
  from torchao.quantization import quantize_
15
  from torchao.quantization import Float8DynamicActivationFloat8WeightConfig, Int8WeightOnlyConfig
 
16
  import aoti
17
 
18
  # =========================================================
19
  # MODEL CONFIGURATION
20
  # =========================================================
21
+ MODEL_ID = "dream2589632147/Dream-wan2-2-faster-Pro"
22
 
23
  MAX_DIM = 832
24
  MIN_DIM = 480
 
26
  MULTIPLE_OF = 16
27
 
28
  MAX_SEED = np.iinfo(np.int32).max
 
29
  FIXED_FPS = 16
30
  MIN_FRAMES_MODEL = 8
31
+ MAX_FRAMES_MODEL = 720
 
32
  MIN_DURATION = round(MIN_FRAMES_MODEL / FIXED_FPS, 1)
33
  MAX_DURATION = round(MAX_FRAMES_MODEL / FIXED_FPS, 1)
34
 
35
+ # =========================================================
36
+ # LOAD TRANSFORMERS (باستخدام HF_TOKEN)
37
+ # =========================================================
38
+ transformer = WanTransformer3DModel.from_pretrained(
39
+ MODEL_ID,
40
+ subfolder="transformer",
41
+ torch_dtype=torch.bfloat16,
42
+ device_map="cuda",
43
+ token=os.environ.get("HF_TOKEN") # ← يستخدم التوكن للوصول إلى النموذج
44
+ )
45
+
46
+ transformer_2 = WanTransformer3DModel.from_pretrained(
47
+ MODEL_ID,
48
+ subfolder="transformer_2",
49
+ torch_dtype=torch.bfloat16,
50
+ device_map="cuda",
51
+ token=os.environ.get("HF_TOKEN")
52
+ )
53
+
54
  # =========================================================
55
  # LOAD PIPELINE
56
  # =========================================================
57
  pipe = WanImageToVideoPipeline.from_pretrained(
58
  MODEL_ID,
59
+ transformer=transformer,
60
+ transformer_2=transformer_2,
 
 
 
 
 
 
 
 
 
 
61
  torch_dtype=torch.bfloat16,
62
+ token=os.environ.get("HF_TOKEN")
63
  ).to("cuda")
64
 
65
  # =========================================================
 
113
  aspect_ratio = width / height
114
  MAX_ASPECT_RATIO = MAX_DIM / MIN_DIM
115
  MIN_ASPECT_RATIO = MIN_DIM / MAX_DIM
 
116
  image_to_resize = image
117
 
118
  if aspect_ratio > MAX_ASPECT_RATIO:
 
133
 
134
  final_w = round(target_w / MULTIPLE_OF) * MULTIPLE_OF
135
  final_h = round(target_h / MULTIPLE_OF) * MULTIPLE_OF
 
136
  final_w = max(MIN_DIM, min(MAX_DIM, final_w))
137
  final_h = max(MIN_DIM, min(MAX_DIM, final_h))
 
138
  return image_to_resize.resize((final_w, final_h), Image.LANCZOS)
139
 
140
  # =========================================================
141
+ # UTILITIES
142
  # =========================================================
143
  def get_num_frames(duration_seconds: float):
144
  return 1 + int(np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL))
 
157
  return 10 + int(steps) * step_duration
158
 
159
  # =========================================================
160
+ # MAIN FUNCTION
161
  # =========================================================
162
  @spaces.GPU(duration=get_duration)
163
  def generate_video(