weiyuchoumou526 commited on
Commit
35b9a3d
·
1 Parent(s): c691e3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -15
app.py CHANGED
@@ -36,6 +36,16 @@ def filter_kwargs(cls, kwargs):
36
  valid_params = set(sig.parameters.keys()) - {'self', 'cls'}
37
  return {k: v for k, v in kwargs.items() if k in valid_params}
38
 
 
 
 
 
 
 
 
 
 
 
39
  # pretrained_model_path = "./models/Diffusion_Transformer/Wan2.1-Fun-1.3B-InP"
40
  pretrained_model_path = "alibaba-pai/Wan2.1-Fun-1.3B-InP"
41
  transformer_path = "Kunbyte/ROSE"
@@ -43,12 +53,27 @@ transformer_path = "Kunbyte/ROSE"
43
  config_path = "./configs/wan2.1/wan_civitai.yaml"
44
  config = OmegaConf.load(config_path)
45
 
46
- text_encoder_subpath = config['text_encoder_kwargs'].get('text_encoder_subpath', 'text_encoder')
 
 
 
 
 
 
 
 
47
  text_encoder = WanT5EncoderModel.from_pretrained(
48
- pretrained_model_path,
49
- subfolder=text_encoder_subpath,
50
  additional_kwargs=OmegaConf.to_container(config['text_encoder_kwargs']),
51
  low_cpu_mem_usage=True,
 
 
 
 
 
 
 
 
52
  )
53
 
54
  transformer_subpath = config['transformer_additional_kwargs'].get('transformer_subpath', 'transformer')
@@ -58,22 +83,11 @@ transformer3d = WanTransformer3DModel.from_pretrained(
58
  transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']),
59
  )
60
 
61
- image_encoder_subpath = config['image_encoder_kwargs'].get('image_encoder_subpath', 'image_encoder')
62
- clip_image_encoder = CLIPModel.from_pretrained(pretrained_model_path, subfolder=image_encoder_subpath)
63
-
64
- vae_subpath = config['vae_kwargs'].get('vae_subpath', 'vae')
65
- vae = AutoencoderKLWan.from_pretrained(
66
- pretrained_model_path,
67
- subfolder=vae_subpath,
68
- additional_kwargs=OmegaConf.to_container(config['vae_kwargs']),
69
- )
70
-
71
  noise_scheduler = FlowMatchEulerDiscreteScheduler(
72
  **filter_kwargs(FlowMatchEulerDiscreteScheduler, OmegaConf.to_container(config['scheduler_kwargs']))
73
  )
74
 
75
- tokenizer_subpath = config['text_encoder_kwargs'].get('tokenizer_subpath', 'tokenizer')
76
- tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path, tokenizer_subpath)
77
  # tokenizer = AutoTokenizer.from_pretrained(
78
  # os.path.join(pretrained_model_path, config['text_encoder_kwargs'].get('tokenizer_subpath', 'tokenizer')),
79
  # )
 
36
  valid_params = set(sig.parameters.keys()) - {'self', 'cls'}
37
  return {k: v for k, v in kwargs.items() if k in valid_params}
38
 
39
+ from huggingface_hub import snapshot_download
40
+
41
+ def download_component_subfolder(repo_id, subfolder):
42
+ local_dir = snapshot_download(
43
+ repo_id=repo_id,
44
+ repo_type="model",
45
+ allow_patterns=[f"{subfolder}/*"]
46
+ )
47
+ return os.path.join(local_dir, subfolder)
48
+
49
  # pretrained_model_path = "./models/Diffusion_Transformer/Wan2.1-Fun-1.3B-InP"
50
  pretrained_model_path = "alibaba-pai/Wan2.1-Fun-1.3B-InP"
51
  transformer_path = "Kunbyte/ROSE"
 
53
  config_path = "./configs/wan2.1/wan_civitai.yaml"
54
  config = OmegaConf.load(config_path)
55
 
56
+ repo_id = "alibaba-pai/Wan2.1-Fun-1.3B-InP"
57
+
58
+ text_encoder_path = download_component_subfolder(repo_id, config['text_encoder_kwargs'].get('text_encoder_subpath', 'text_encoder'))
59
+ tokenizer_path = download_component_subfolder(repo_id, config['text_encoder_kwargs'].get('tokenizer_subpath', 'tokenizer'))
60
+ image_encoder_path = download_component_subfolder(repo_id, config['image_encoder_kwargs'].get('image_encoder_subpath', 'image_encoder'))
61
+ vae_path = download_component_subfolder(repo_id, config['vae_kwargs'].get('vae_subpath', 'vae'))
62
+
63
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
64
+
65
  text_encoder = WanT5EncoderModel.from_pretrained(
66
+ text_encoder_path,
 
67
  additional_kwargs=OmegaConf.to_container(config['text_encoder_kwargs']),
68
  low_cpu_mem_usage=True,
69
+ torch_dtype=torch.bfloat16
70
+ )
71
+
72
+ clip_image_encoder = CLIPModel.from_pretrained(image_encoder_path)
73
+
74
+ vae = AutoencoderKLWan.from_pretrained(
75
+ vae_path,
76
+ additional_kwargs=OmegaConf.to_container(config['vae_kwargs']),
77
  )
78
 
79
  transformer_subpath = config['transformer_additional_kwargs'].get('transformer_subpath', 'transformer')
 
83
  transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']),
84
  )
85
 
 
 
 
 
 
 
 
 
 
 
86
  noise_scheduler = FlowMatchEulerDiscreteScheduler(
87
  **filter_kwargs(FlowMatchEulerDiscreteScheduler, OmegaConf.to_container(config['scheduler_kwargs']))
88
  )
89
 
90
+
 
91
  # tokenizer = AutoTokenizer.from_pretrained(
92
  # os.path.join(pretrained_model_path, config['text_encoder_kwargs'].get('tokenizer_subpath', 'tokenizer')),
93
  # )