Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import numpy as np | |
| import torch | |
| from diffusers.utils import load_image | |
| from diffusers import StableDiffusionControlNetPipeline, ControlNetModel | |
| from peft import PeftModel, LoraConfig | |
| from controlnet_aux import HEDdetector | |
| from PIL import Image | |
| import cv2 as cv | |
| import os | |
| from functools import lru_cache | |
| from contextlib import contextmanager | |
| MAX_SEED = np.iinfo(np.int32).max | |
| MAX_IMAGE_SIZE = 1024 | |
| IP_ADAPTER = 'h94/IP-Adapter' | |
| IP_ADAPTER_WEIGHT_NAME = "ip-adapter-plus_sd15.bin" | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model_id_default = "stable-diffusion-v1-5/stable-diffusion-v1-5" | |
| torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |
| class PipelineManager: | |
| def __init__(self): | |
| self.pipe = None | |
| self.current_model = None | |
| self.controlnet_cache = {} | |
| self.hed = None | |
| def get_controlnet(self, model_name: str) -> ControlNetModel: | |
| if model_name not in self.controlnet_cache: | |
| self.controlnet_cache[model_name] = ControlNetModel.from_pretrained( | |
| model_name, | |
| cache_dir="./models_cache", | |
| torch_dtype=torch_dtype | |
| ).to(device) | |
| return self.controlnet_cache[model_name] | |
| def get_hed_detector(self): | |
| if self.hed is None: | |
| self.hed = HEDdetector.from_pretrained('lllyasviel/Annotators') | |
| return self.hed | |
| def initialize_pipeline(self, model_id, controlnet_model): | |
| controlnet = self.get_controlnet(controlnet_model) | |
| if not self.pipe or model_id != self.current_model: | |
| self.pipe = self.create_pipeline(model_id, controlnet) | |
| self.current_model = model_id | |
| return self.pipe | |
| def create_pipeline(self, model_id, controlnet): | |
| pipe = StableDiffusionControlNetPipeline.from_pretrained( | |
| model_id, | |
| torch_dtype=torch_dtype, | |
| controlnet=controlnet, | |
| cache_dir="./models_cache" | |
| ).to(device) | |
| if os.path.exists('./lora_logos'): | |
| pipe = self.load_lora_adapters(pipe) | |
| return pipe | |
| def load_lora_adapters(self, pipe): | |
| unet_dir = os.path.join('./lora_logos', "unet") | |
| text_encoder_dir = os.path.join('./lora_logos', "text_encoder") | |
| pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_dir, adapter_name="default") | |
| if os.path.exists(text_encoder_dir): | |
| pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_dir) | |
| return pipe.to(device) | |
| def torch_inference_mode(): | |
| with torch.inference_mode(), torch.autocast(device.type): | |
| yield | |
| def process_embeddings(prompt, negative_prompt, tokenizer, text_encoder): | |
| def process_text(text): | |
| tokens = tokenizer(text, return_tensors="pt", truncation=False).input_ids | |
| chunks = [tokens[:, i:i+77].to(device) for i in range(0, tokens.size(1), 77)] | |
| return torch.cat([text_encoder(chunk)[0] for chunk in chunks], dim=1) | |
| prompt_emb = process_text(prompt) | |
| negative_emb = process_text(negative_prompt) | |
| max_len = max(prompt_emb.size(1), negative_emb.size(1)) | |
| return ( | |
| torch.nn.functional.pad(prompt_emb, (0, 0, 0, max_len - prompt_emb.size(1))), | |
| torch.nn.functional.pad(negative_emb, (0, 0, 0, max_len - negative_emb.size(1))) | |
| ) | |
| def process_control_image(image_path: str, processor: str, hed_detector) -> Image: | |
| image = load_image(image_path).convert('RGB') | |
| if processor == 'edge_detection': | |
| edges = cv.Canny(np.array(image), 80, 160) | |
| return Image.fromarray(np.repeat(edges[:, :, None], 3, axis=2)) | |
| if processor == 'scribble': | |
| scribble = hed_detector(image) | |
| processed = cv.medianBlur(np.array(scribble), 3) | |
| return Image.fromarray(cv.convertScaleAbs(processed, alpha=1.5)) | |
| pipeline_mgr = PipelineManager() | |
| controlnet_models = { | |
| "edge_detection": "lllyasviel/sd-controlnet-canny", | |
| "scribble": "lllyasviel/sd-controlnet-scribble" | |
| } | |
| def infer( | |
| prompt, | |
| negative_prompt, | |
| width=512, | |
| height=512, | |
| num_inference_steps=20, | |
| model_id='stable-diffusion-v1-5/stable-diffusion-v1-5', | |
| seed=42, | |
| guidance_scale=7.0, | |
| lora_scale=0.5, | |
| cn_enable=False, | |
| cn_strength=0.0, | |
| cn_mode='edge_detection', | |
| cn_image=None, | |
| ip_enable=False, | |
| ip_scale=0.5, | |
| ip_image=None, | |
| progress=gr.Progress(track_tqdm=True) | |
| ): | |
| generator = torch.Generator(device).manual_seed(seed) | |
| with torch_inference_mode(): | |
| pipe = pipeline_mgr.initialize_pipeline( | |
| model_id, | |
| controlnet_models.get(cn_mode, controlnet_models['edge_detection']) | |
| ) | |
| if cn_enable and not cn_image: | |
| raise gr.Error("ControlNet enabled but no image provided!") | |
| if ip_enable and not ip_image: | |
| raise gr.Error("IP-Adapter enabled but no image provided!") | |
| prompt_emb, negative_emb = process_embeddings( | |
| prompt, | |
| negative_prompt, | |
| pipe.tokenizer, | |
| pipe.text_encoder | |
| ) | |
| params = { | |
| 'prompt_embeds': prompt_emb, | |
| 'negative_prompt_embeds': negative_emb, | |
| 'guidance_scale': guidance_scale, | |
| 'num_inference_steps': num_inference_steps, | |
| 'width': width, | |
| 'height': height, | |
| 'generator': generator, | |
| 'cross_attention_kwargs': {"scale": lora_scale}, | |
| } | |
| if cn_enable: | |
| params['image'] = process_control_image( | |
| cn_image, | |
| cn_mode, | |
| pipeline_mgr.get_hed_detector() | |
| ) | |
| params['controlnet_conditioning_scale'] = float(cn_strength) | |
| else: | |
| params['image'] = torch.zeros((1, 3, 512, 512)).to(device) # заглушка, чтобы pipeline не падал | |
| params['controlnet_conditioning_scale'] = 0.0 | |
| if ip_enable: | |
| pipe.load_ip_adapter(IP_ADAPTER, subfolder="models", weight_name=IP_ADAPTER_WEIGHT_NAME) | |
| params['ip_adapter_image'] = load_image(ip_image).convert('RGB') | |
| pipe.set_ip_adapter_scale(ip_scale) | |
| pipe.fuse_lora(lora_scale=lora_scale) | |
| return pipe(**params).images[0] | |
| css = """ | |
| #col-container { | |
| margin: 0 auto; | |
| max-width: 640px; | |
| } | |
| """ | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown("# ⚽️ Football Logo Generator") | |
| with gr.Row(): | |
| model_id = gr.Textbox( | |
| label="Model ID", | |
| max_lines=1, | |
| placeholder="Enter model id like 'stable-diffusion-v1-5/stable-diffusion-v1-5'", | |
| value=model_id_default | |
| ) | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| max_lines=1, | |
| placeholder="Enter your prompt", | |
| ) | |
| negative_prompt = gr.Textbox( | |
| label="Negative prompt", | |
| max_lines=1, | |
| placeholder="Enter a negative prompt", | |
| ) | |
| with gr.Row(): | |
| seed = gr.Number( | |
| label="Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=42, | |
| ) | |
| with gr.Row(): | |
| guidance_scale = gr.Slider( | |
| label="Guidance scale", | |
| minimum=0.0, | |
| maximum=10.0, | |
| step=0.1, | |
| value=7.0, | |
| ) | |
| with gr.Row(): | |
| lora_scale = gr.Slider( | |
| label="LoRA scale", | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.1, | |
| value=0.5, | |
| ) | |
| with gr.Row(): | |
| num_inference_steps = gr.Slider( | |
| label="Number of inference steps", | |
| minimum=1, | |
| maximum=50, | |
| step=1, | |
| value=20, | |
| ) | |
| # Секция Control Net | |
| cn_enable = gr.Checkbox(label="Enable ControlNet") | |
| with gr.Column(visible=False) as cn_options: | |
| with gr.Row(): | |
| cn_strength = gr.Slider(0, 2, value=0.8, step=0.1, label="Control strength", interactive=True) | |
| cn_mode = gr.Dropdown( | |
| choices=["edge_detection", "scribble"], | |
| value="edge_detection", | |
| label="Work regime", | |
| interactive=True, | |
| ) | |
| cn_image = gr.Image(type="filepath", label="Control image") | |
| cn_enable.change( | |
| lambda x: gr.update(visible=x), | |
| inputs=cn_enable, | |
| outputs=cn_options | |
| ) | |
| # Секция IP-Adapter | |
| ip_enable = gr.Checkbox(label="Enable IP-Adapter") | |
| with gr.Column(visible=False) as ip_options: | |
| ip_scale = gr.Slider(0, 1, value=0.5, step=0.1, label="IP-adapter scale", interactive=True) | |
| ip_image = gr.Image(type="filepath", label="IP-adapter image", interactive=True) | |
| ip_enable.change( | |
| lambda x: gr.update(visible=x), | |
| inputs=ip_enable, | |
| outputs=ip_options | |
| ) | |
| with gr.Accordion("Optional Settings", open=False): | |
| with gr.Row(): | |
| width = gr.Slider( | |
| label="Width", | |
| minimum=256, | |
| maximum=MAX_IMAGE_SIZE, | |
| step=32, | |
| value=512, | |
| ) | |
| with gr.Row(): | |
| height = gr.Slider( | |
| label="Height", | |
| minimum=256, | |
| maximum=MAX_IMAGE_SIZE, | |
| step=32, | |
| value=512, | |
| ) | |
| run_button = gr.Button("Run", scale=1, variant="primary") | |
| result = gr.Image(label="Result", show_label=False) | |
| gr.on( | |
| triggers=[run_button.click, prompt.submit], | |
| fn=infer, | |
| inputs=[ | |
| prompt, | |
| negative_prompt, | |
| width, | |
| height, | |
| num_inference_steps, | |
| model_id, | |
| seed, | |
| guidance_scale, | |
| lora_scale, | |
| cn_enable, | |
| cn_strength, | |
| cn_mode, | |
| cn_image, | |
| ip_enable, | |
| ip_scale, | |
| ip_image | |
| ], | |
| outputs=[result], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |