Upload DeepShrinkHires2.0.fix using SD-Hub
Browse files
DeepShrinkHires2.0.fix/.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
settings.json
|
DeepShrinkHires2.0.fix/scripts/DeepShrinkHires.fix.py
ADDED
|
@@ -0,0 +1,706 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deep Shrink Hires.fix (RU++ v2.3.1 LTS + Experimental, FIXED)
|
| 2 |
+
# Исправления: статические _block_scale/_curve_weight; синхронизация instance→class после process().
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import List, Optional, Dict, Any, Tuple
|
| 6 |
+
import json, os
|
| 7 |
+
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
|
| 12 |
+
import modules.devices as devices
|
| 13 |
+
import modules.scripts as scripts
|
| 14 |
+
import modules.script_callbacks as script_callbacks
|
| 15 |
+
import modules.sd_unet as sd_unet
|
| 16 |
+
import modules.shared as shared
|
| 17 |
+
|
| 18 |
+
from ldm.modules.diffusionmodules.openaimodel import Upsample, Downsample, ResBlock
|
| 19 |
+
from ldm.modules.diffusionmodules.util import timestep_embedding
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# -------------------------- Утилиты --------------------------
|
| 23 |
+
|
| 24 |
+
def _to_scalar(x) -> float:
|
| 25 |
+
if isinstance(x, torch.Tensor):
|
| 26 |
+
return float(x.item())
|
| 27 |
+
try:
|
| 28 |
+
return float(x)
|
| 29 |
+
except Exception:
|
| 30 |
+
return 0.0
|
| 31 |
+
|
| 32 |
+
def _clamp(v: float, lo: float, hi: float) -> float:
|
| 33 |
+
return max(lo, min(hi, v))
|
| 34 |
+
|
| 35 |
+
def _get_or_last(seq: List[float], index: int, default: float) -> float:
|
| 36 |
+
if not seq:
|
| 37 |
+
return default
|
| 38 |
+
return seq[index] if index < len(seq) else seq[-1]
|
| 39 |
+
|
| 40 |
+
def _safe_size(h: torch.Tensor, scale_factor: float, min_feat: int) -> Tuple[int, int]:
|
| 41 |
+
hi, wi = h.shape[-2], h.shape[-1]
|
| 42 |
+
ho = max(2, int(round(hi * scale_factor)))
|
| 43 |
+
wo = max(2, int(round(wi * scale_factor)))
|
| 44 |
+
if ho < min_feat or wo < min_feat:
|
| 45 |
+
return hi, wi
|
| 46 |
+
return ho, wo
|
| 47 |
+
|
| 48 |
+
def _interpolate(img: torch.Tensor, size: Tuple[int, int], mode: str, antialias: bool) -> torch.Tensor:
|
| 49 |
+
if size == img.shape[-2:]:
|
| 50 |
+
return img
|
| 51 |
+
dtype = img.dtype
|
| 52 |
+
try:
|
| 53 |
+
out = F.interpolate(
|
| 54 |
+
img.float(),
|
| 55 |
+
size=size,
|
| 56 |
+
mode=mode,
|
| 57 |
+
align_corners=False if mode in ("bilinear", "bicubic") else None,
|
| 58 |
+
antialias=(antialias if mode in ("bilinear", "bicubic") else False),
|
| 59 |
+
)
|
| 60 |
+
except TypeError:
|
| 61 |
+
out = F.interpolate(
|
| 62 |
+
img.float(),
|
| 63 |
+
size=size,
|
| 64 |
+
mode=mode,
|
| 65 |
+
align_corners=False if mode in ("bilinear", "bicubic") else None,
|
| 66 |
+
)
|
| 67 |
+
return out.to(dtype)
|
| 68 |
+
|
| 69 |
+
def _resize(h: torch.Tensor, scale: float, mode: str, antialias: bool, min_feat: int) -> torch.Tensor:
|
| 70 |
+
if scale == 1.0:
|
| 71 |
+
return h
|
| 72 |
+
size = _safe_size(h, scale, min_feat)
|
| 73 |
+
return _interpolate(h, size, mode, antialias)
|
| 74 |
+
|
| 75 |
+
def _parse_number_list(text: str, as_int: bool = False) -> List[float]:
|
| 76 |
+
text = (text or "").replace("\n", " ")
|
| 77 |
+
vals: List[float] = []
|
| 78 |
+
for chunk in text.split(";"):
|
| 79 |
+
s = chunk.strip()
|
| 80 |
+
if not s:
|
| 81 |
+
continue
|
| 82 |
+
if "/" in s:
|
| 83 |
+
a, b = s.split("/", 1)
|
| 84 |
+
v = float(a.strip()) / float(b.strip())
|
| 85 |
+
else:
|
| 86 |
+
v = float(s)
|
| 87 |
+
vals.append(v)
|
| 88 |
+
if not vals:
|
| 89 |
+
raise ValueError("Список пуст.")
|
| 90 |
+
return [int(round(v)) for v in vals] if as_int else vals
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
# -------------------------- Данные --------------------------
|
| 94 |
+
|
| 95 |
+
@dataclass
|
| 96 |
+
class DSHFAction:
|
| 97 |
+
enable: bool
|
| 98 |
+
timestep: float
|
| 99 |
+
depth: int
|
| 100 |
+
scale: float
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# -------------------------- Скрипт --------------------------
|
| 104 |
+
|
| 105 |
+
class DSHF(scripts.Script):
|
| 106 |
+
# Счётчики текущего прохода (класс-поля нужны U-Net'у)
|
| 107 |
+
currentBlock: int = 0
|
| 108 |
+
currentConv: int = 0
|
| 109 |
+
currentTimestep: float = 1000.0
|
| 110 |
+
|
| 111 |
+
# Глобальные параметры (класс-поля: U-Net читает их как DSHF.*)
|
| 112 |
+
enabled: bool = True
|
| 113 |
+
interp_method: str = "bicubic"
|
| 114 |
+
interp_antialias: bool = True
|
| 115 |
+
channels_last: bool = False
|
| 116 |
+
min_feature_size: int = 8
|
| 117 |
+
|
| 118 |
+
# Кривая
|
| 119 |
+
curve_enable: bool = False
|
| 120 |
+
curve_type: str = "linear"
|
| 121 |
+
curve_t_start: float = 800.0
|
| 122 |
+
curve_t_end: float = 200.0
|
| 123 |
+
curve_scale_start: float = 1.0
|
| 124 |
+
curve_scale_end: float = 1.0
|
| 125 |
+
curve_alpha: float = 0.5
|
| 126 |
+
auto_end_enable: bool = False
|
| 127 |
+
auto_end_strength: float = 0.35
|
| 128 |
+
|
| 129 |
+
# Пороговые правила
|
| 130 |
+
actions: List[DSHFAction] = []
|
| 131 |
+
|
| 132 |
+
# Experimental (класс-поля для доступа из U-Net)
|
| 133 |
+
exp_section_enable: bool = False
|
| 134 |
+
exp_enable: bool = False
|
| 135 |
+
exp_timestep: float = 625.0
|
| 136 |
+
exp_scales: List[float] = [1.0]
|
| 137 |
+
exp_dilations: List[int] = [1]
|
| 138 |
+
exp_in_muls: List[float] = [1.0]
|
| 139 |
+
exp_out_muls: List[float] = [1.0]
|
| 140 |
+
exp_cfg_muls: List[float] = [1.0]
|
| 141 |
+
|
| 142 |
+
# ---------------- UI ----------------
|
| 143 |
+
|
| 144 |
+
def title(self): return "Deep Shrink Hires.fix (RU++ v2.3.1)"
|
| 145 |
+
|
| 146 |
+
def show(self, is_img2img): return scripts.AlwaysVisible
|
| 147 |
+
|
| 148 |
+
def ui(self, is_img2img):
|
| 149 |
+
with gr.Tabs():
|
| 150 |
+
with gr.TabItem("Настройки"):
|
| 151 |
+
Enable_Ext = gr.Checkbox(value=True, label="Включить расширение")
|
| 152 |
+
|
| 153 |
+
with gr.Accordion("Пороги (до 8 правил)", open=True):
|
| 154 |
+
Rule_Count = gr.Slider(1, 8, value=2, step=1, label="Сколько правил использовать")
|
| 155 |
+
rules = []
|
| 156 |
+
defaults = [
|
| 157 |
+
(True, 625, 3, 2.0),
|
| 158 |
+
(True, 0, 3, 2.0),
|
| 159 |
+
(False, 900, 3, 2.0),
|
| 160 |
+
(False, 650, 3, 2.0),
|
| 161 |
+
(False, 900, 3, 2.0),
|
| 162 |
+
(False, 650, 3, 2.0),
|
| 163 |
+
(False, 900, 3, 2.0),
|
| 164 |
+
(False, 650, 3, 2.0),
|
| 165 |
+
]
|
| 166 |
+
for i, (en, ts, dp, sc) in enumerate(defaults, start=1):
|
| 167 |
+
with gr.Row():
|
| 168 |
+
rules.append((
|
| 169 |
+
gr.Checkbox(value=en, label=f"Правило {i}"),
|
| 170 |
+
gr.Number(value=ts, label=f"Timestep {i}"),
|
| 171 |
+
gr.Number(value=dp, label=f"Глубина блока {i}", precision=0),
|
| 172 |
+
gr.Number(value=sc, label=f"Масштаб {i}"),
|
| 173 |
+
))
|
| 174 |
+
|
| 175 |
+
with gr.Accordion("Глобальная кривая масштаба", open=False):
|
| 176 |
+
Curve_Enable = gr.Checkbox(value=False, label="Включить кривую")
|
| 177 |
+
Curve_Type = gr.Dropdown(choices=["linear", "cosine", "sigmoid"], value="linear", label="Тип")
|
| 178 |
+
with gr.Row():
|
| 179 |
+
Curve_t_start = gr.Number(value=800, label="t_start")
|
| 180 |
+
Curve_t_end = gr.Number(value=200, label="t_end")
|
| 181 |
+
with gr.Row():
|
| 182 |
+
Curve_scale_start = gr.Number(value=1.0, label="scale_start")
|
| 183 |
+
Curve_scale_end = gr.Number(value=1.0, label="scale_end")
|
| 184 |
+
Curve_alpha = gr.Slider(0.0, 1.0, value=0.5, step=0.05, label="alpha компенсации")
|
| 185 |
+
Min_feature = gr.Slider(2, 64, value=8, step=1, label="Мин. размер фичей")
|
| 186 |
+
with gr.Row():
|
| 187 |
+
Auto_end_enable = gr.Checkbox(value=False, label="Автоподбор scale_end по целевому разрешению")
|
| 188 |
+
Auto_end_strength = gr.Slider(0.0, 1.0, value=0.35, step=0.05, label="Сила автоподбора")
|
| 189 |
+
|
| 190 |
+
with gr.Accordion("Выполнение", open=False):
|
| 191 |
+
Interp_Method = gr.Dropdown(choices=["nearest", "bilinear", "bicubic", "area"], value="bicubic", label="Интерполяция")
|
| 192 |
+
Interp_AA = gr.Checkbox(value=True, label="Антиалиасинг для bilinear/bicubic")
|
| 193 |
+
Channels_Last = gr.Checkbox(value=False, label="Оптимизация channels_last")
|
| 194 |
+
|
| 195 |
+
with gr.Accordion("Экспериментальные (пер-свёрточные)", open=False):
|
| 196 |
+
Exp_Section_Enable = gr.Checkbox(value=False, label="Включить секцию")
|
| 197 |
+
with gr.Group(visible=False) as ExpGrp:
|
| 198 |
+
Exp_Enable = gr.Checkbox(value=False, label="Активировать экспериментальное ядро")
|
| 199 |
+
Exp_Timestep = gr.Number(value=625, label="Пороговой timestep для эксперимента")
|
| 200 |
+
Exp_Scales = gr.Textbox(value="1", lines=2, label="Масштабы по свёрткам")
|
| 201 |
+
Exp_Dilations = gr.Textbox(value="1", lines=1, label="Дилатации по свёрткам (целые)")
|
| 202 |
+
Exp_InMuls = gr.Textbox(value="1", lines=1, label="Входные умножители по блокам")
|
| 203 |
+
Exp_OutMuls = gr.Textbox(value="1", lines=1, label="Выходные умножители по блокам")
|
| 204 |
+
Exp_CFGMuls = gr.Textbox(value="1", lines=1, label="CFG-множители по блокам")
|
| 205 |
+
Exp_Section_Enable.change(lambda v: gr.update(visible=bool(v)), Exp_Section_Enable, ExpGrp)
|
| 206 |
+
|
| 207 |
+
with gr.Accordion("Пресеты", open=False):
|
| 208 |
+
Preset_Action = gr.Dropdown(choices=["Сохранить", "Загрузить"], value="Загрузить", label="Действие")
|
| 209 |
+
Preset_Name = gr.Textbox(value="", label="Имя пресета")
|
| 210 |
+
Preset_JSON = gr.Textbox(value="", lines=6, label="Профиль в JSON (для импорта/экспорта)")
|
| 211 |
+
|
| 212 |
+
with gr.TabItem("Справка"):
|
| 213 |
+
gr.Markdown("""
|
| 214 |
+
**LTS** — безопасное масштабирование только на границах блоков.
|
| 215 |
+
**Experimental** — опционально: пер-свёрточные масштабы, дилатации, In/Out и CFG-мультипликаторы.
|
| 216 |
+
""")
|
| 217 |
+
|
| 218 |
+
flat = [Enable_Ext, Rule_Count]
|
| 219 |
+
for row in rules:
|
| 220 |
+
flat += list(row)
|
| 221 |
+
flat += [
|
| 222 |
+
Curve_Enable, Curve_Type, Curve_t_start, Curve_t_end,
|
| 223 |
+
Curve_scale_start, Curve_scale_end, Curve_alpha, Min_feature,
|
| 224 |
+
Auto_end_enable, Auto_end_strength,
|
| 225 |
+
Interp_Method, Interp_AA, Channels_Last,
|
| 226 |
+
Exp_Section_Enable, Exp_Enable, Exp_Timestep,
|
| 227 |
+
Exp_Scales, Exp_Dilations, Exp_InMuls, Exp_OutMuls, Exp_CFGMuls,
|
| 228 |
+
Preset_Action, Preset_Name, Preset_JSON,
|
| 229 |
+
]
|
| 230 |
+
return flat
|
| 231 |
+
|
| 232 |
+
# ---------------- Исполнение ----------------
|
| 233 |
+
|
| 234 |
+
def _reset_instance_defaults(self):
|
| 235 |
+
# только для локальной сборки входных значений; класс-поля перезапишем ниже
|
| 236 |
+
self._inst_actions: List[DSHFAction] = []
|
| 237 |
+
self._inst_curve_enable = False
|
| 238 |
+
self._inst_curve_type = "linear"
|
| 239 |
+
self._inst_curve_t_start = 800.0
|
| 240 |
+
self._inst_curve_t_end = 200.0
|
| 241 |
+
self._inst_curve_scale_start = 1.0
|
| 242 |
+
self._inst_curve_scale_end = 1.0
|
| 243 |
+
self._inst_curve_alpha = 0.5
|
| 244 |
+
self._inst_auto_end_enable = False
|
| 245 |
+
self._inst_auto_end_strength = 0.35
|
| 246 |
+
self._inst_interp_method = "bicubic"
|
| 247 |
+
self._inst_interp_antialias = True
|
| 248 |
+
self._inst_channels_last = False
|
| 249 |
+
self._inst_min_feature_size = 8
|
| 250 |
+
self._inst_exp_section_enable = False
|
| 251 |
+
self._inst_exp_enable = False
|
| 252 |
+
self._inst_exp_timestep = 625.0
|
| 253 |
+
self._inst_exp_scales = [1.0]
|
| 254 |
+
self._inst_exp_dilations = [1]
|
| 255 |
+
self._inst_exp_in_muls = [1.0]
|
| 256 |
+
self._inst_exp_out_muls = [1.0]
|
| 257 |
+
self._inst_exp_cfg_muls = [1.0]
|
| 258 |
+
|
| 259 |
+
@staticmethod
|
| 260 |
+
def _curve_weight(ts: float) -> Optional[float]:
|
| 261 |
+
if not DSHF.curve_enable:
|
| 262 |
+
return None
|
| 263 |
+
t0, t1 = DSHF.curve_t_start, DSHF.curve_t_end
|
| 264 |
+
if t0 == t1:
|
| 265 |
+
return DSHF.curve_scale_end
|
| 266 |
+
x = _clamp((ts - t1) / (t0 - t1), 0.0, 1.0)
|
| 267 |
+
if DSHF.curve_type == "linear":
|
| 268 |
+
w = x
|
| 269 |
+
elif DSHF.curve_type == "cosine":
|
| 270 |
+
w = 0.5 - 0.5 * torch.cos(torch.tensor(x) * torch.pi).item()
|
| 271 |
+
else:
|
| 272 |
+
w = 1.0 / (1.0 + torch.exp(torch.tensor(-10.0 * (x - 0.5)))).item()
|
| 273 |
+
s = DSHF.curve_scale_start + (DSHF.curve_scale_end - DSHF.curve_scale_start) * w
|
| 274 |
+
return _clamp(float(s), 0.25, 4.0)
|
| 275 |
+
|
| 276 |
+
@staticmethod
|
| 277 |
+
def _block_scale(depth: int, ts: float) -> Optional[float]:
|
| 278 |
+
rule_scale: Optional[float] = None
|
| 279 |
+
for a in DSHF.actions:
|
| 280 |
+
if a.enable and a.depth == depth and a.timestep <= ts:
|
| 281 |
+
rule_scale = a.scale
|
| 282 |
+
break
|
| 283 |
+
curve_scale = DSHF._curve_weight(ts)
|
| 284 |
+
if rule_scale is None and curve_scale is None:
|
| 285 |
+
return None
|
| 286 |
+
if rule_scale is None:
|
| 287 |
+
return curve_scale
|
| 288 |
+
if curve_scale is None:
|
| 289 |
+
return rule_scale
|
| 290 |
+
return _clamp(rule_scale * curve_scale, 0.25, 4.0)
|
| 291 |
+
|
| 292 |
+
def _auto_scale_end(self, p) -> Optional[float]:
|
| 293 |
+
if not self._inst_auto_end_enable or not self._inst_curve_enable:
|
| 294 |
+
return None
|
| 295 |
+
try:
|
| 296 |
+
bw, bh = int(getattr(p, "width", 0)), int(getattr(p, "height", 0))
|
| 297 |
+
if bw <= 0 or bh <= 0:
|
| 298 |
+
return None
|
| 299 |
+
tw, th = bw, bh
|
| 300 |
+
if getattr(p, "enable_hr", False):
|
| 301 |
+
hrx = int(getattr(p, "hr_resize_x", 0))
|
| 302 |
+
hry = int(getattr(p, "hr_resize_y", 0))
|
| 303 |
+
hrs = float(getattr(p, "hr_scale", 0.0) or 0.0)
|
| 304 |
+
if hrx > 0 and hry > 0:
|
| 305 |
+
tw, th = hrx, hry
|
| 306 |
+
elif hrs > 0.0:
|
| 307 |
+
tw, th = int(round(bw * hrs)), int(round(bh * hrs))
|
| 308 |
+
r = ((max(1, tw * th)) / max(1, bw * bh)) ** 0.5
|
| 309 |
+
if r <= 1.0:
|
| 310 |
+
return None
|
| 311 |
+
return _clamp(1.0 + _clamp(self._inst_auto_end_strength, 0.0, 1.0) * (r - 1.0), 1.0, 1.7)
|
| 312 |
+
except Exception:
|
| 313 |
+
return None
|
| 314 |
+
|
| 315 |
+
def process(self, p, *args):
|
| 316 |
+
# Используем только с нашим UNet
|
| 317 |
+
if not isinstance(sd_unet.current_unet, DSHF.DeepShrinkHiresFixUNet):
|
| 318 |
+
return
|
| 319 |
+
|
| 320 |
+
it = iter(args)
|
| 321 |
+
def nxt(): return next(it)
|
| 322 |
+
|
| 323 |
+
# Глобальный тумблер
|
| 324 |
+
enabled = bool(nxt())
|
| 325 |
+
if not enabled:
|
| 326 |
+
DSHF.enabled = False
|
| 327 |
+
return
|
| 328 |
+
|
| 329 |
+
# Сбор значений в instance-поле, чтобы не держать мусор в классе
|
| 330 |
+
self._reset_instance_defaults()
|
| 331 |
+
|
| 332 |
+
# ---- Правила ----
|
| 333 |
+
rule_count = int(_clamp(_to_scalar(nxt()), 1, 8))
|
| 334 |
+
tmp_rules: List[DSHFAction] = []
|
| 335 |
+
for _ in range(8):
|
| 336 |
+
en = bool(nxt()); ts = _to_scalar(nxt()); dp = int(_to_scalar(nxt())); sc = float(_to_scalar(nxt()))
|
| 337 |
+
tmp_rules.append(DSHFAction(en, ts, dp, sc))
|
| 338 |
+
self._inst_actions = tmp_rules[:rule_count]
|
| 339 |
+
|
| 340 |
+
# ---- Кривая ----
|
| 341 |
+
self._inst_curve_enable = bool(nxt())
|
| 342 |
+
self._inst_curve_type = str(nxt())
|
| 343 |
+
self._inst_curve_t_start = _to_scalar(nxt())
|
| 344 |
+
self._inst_curve_t_end = _to_scalar(nxt())
|
| 345 |
+
self._inst_curve_scale_start = float(_to_scalar(nxt()))
|
| 346 |
+
self._inst_curve_scale_end = float(_to_scalar(nxt()))
|
| 347 |
+
self._inst_curve_alpha = float(_clamp(_to_scalar(nxt()), 0.0, 1.0))
|
| 348 |
+
self._inst_min_feature_size = int(_clamp(_to_scalar(nxt()), 2, 256))
|
| 349 |
+
self._inst_auto_end_enable = bool(nxt())
|
| 350 |
+
self._inst_auto_end_strength = float(_clamp(_to_scalar(nxt()), 0.0, 1.0))
|
| 351 |
+
|
| 352 |
+
# ---- Выполнение ----
|
| 353 |
+
self._inst_interp_method = str(nxt())
|
| 354 |
+
self._inst_interp_antialias = bool(nxt())
|
| 355 |
+
self._inst_channels_last = bool(nxt())
|
| 356 |
+
|
| 357 |
+
# ---- Experimental ----
|
| 358 |
+
self._inst_exp_section_enable = bool(nxt())
|
| 359 |
+
self._inst_exp_enable = bool(nxt())
|
| 360 |
+
self._inst_exp_timestep = _to_scalar(nxt())
|
| 361 |
+
try: self._inst_exp_scales = list(map(float, _parse_number_list(str(nxt()), as_int=False)))
|
| 362 |
+
except Exception: self._inst_exp_scales = [1.0]
|
| 363 |
+
try: self._inst_exp_dilations = list(map(int, _parse_number_list(str(nxt()), as_int=True)))
|
| 364 |
+
except Exception: self._inst_exp_dilations = [1]
|
| 365 |
+
try: self._inst_exp_in_muls = list(map(float, _parse_number_list(str(nxt()), as_int=False)))
|
| 366 |
+
except Exception: self._inst_exp_in_muls = [1.0]
|
| 367 |
+
try: self._inst_exp_out_muls = list(map(float, _parse_number_list(str(nxt()), as_int=False)))
|
| 368 |
+
except Exception: self._inst_exp_out_muls = [1.0]
|
| 369 |
+
try: self._inst_exp_cfg_muls = list(map(float, _parse_number_list(str(nxt()), as_int=False)))
|
| 370 |
+
except Exception: self._inst_exp_cfg_muls = [1.0]
|
| 371 |
+
|
| 372 |
+
# ---- Пресеты ----
|
| 373 |
+
preset_action = str(nxt() or "")
|
| 374 |
+
preset_name = str(nxt() or "").strip()
|
| 375 |
+
preset_json = str(nxt() or "").strip()
|
| 376 |
+
preset_path = os.path.join(os.path.dirname(__file__), "dshf_presets.json")
|
| 377 |
+
if preset_action == "Сохранить" and preset_name:
|
| 378 |
+
data = self._export_profile_instance()
|
| 379 |
+
try:
|
| 380 |
+
cur = {"version": 1, "presets": {}}
|
| 381 |
+
if os.path.exists(preset_path):
|
| 382 |
+
with open(preset_path, "r", encoding="utf-8") as f:
|
| 383 |
+
cur = json.load(f)
|
| 384 |
+
cur["presets"][preset_name] = data
|
| 385 |
+
with open(preset_path, "w", encoding="utf-8") as f:
|
| 386 |
+
json.dump(cur, f, ensure_ascii=False, indent=2)
|
| 387 |
+
print(f"[DSHF] Пресет сохранён: {preset_name}")
|
| 388 |
+
except Exception as e:
|
| 389 |
+
print(f"[DSHF] Не удалось сохранить пресет: {e}")
|
| 390 |
+
elif preset_action == "Загрузить":
|
| 391 |
+
if preset_json:
|
| 392 |
+
try:
|
| 393 |
+
prof = json.loads(preset_json)
|
| 394 |
+
self._apply_profile_instance(prof)
|
| 395 |
+
print("[DSHF] Профиль применён из JSON")
|
| 396 |
+
except Exception as e:
|
| 397 |
+
print(f"[DSHF] Ошибка JSON: {e}")
|
| 398 |
+
elif preset_name:
|
| 399 |
+
try:
|
| 400 |
+
with open(preset_path, "r", encoding="utf-8") as f:
|
| 401 |
+
cur = json.load(f)
|
| 402 |
+
prof = cur.get("presets", {}).get(preset_name)
|
| 403 |
+
if prof:
|
| 404 |
+
self._apply_profile_instance(prof)
|
| 405 |
+
print(f"[DSHF] Профиль загружен: {preset_name}")
|
| 406 |
+
except Exception as e:
|
| 407 |
+
print(f"[DSHF] Не удалось загрузить пресет: {e}")
|
| 408 |
+
|
| 409 |
+
# Автоподбор конца кривой
|
| 410 |
+
auto = self._auto_scale_end(p)
|
| 411 |
+
if auto is not None:
|
| 412 |
+
self._inst_curve_scale_end = float(auto)
|
| 413 |
+
|
| 414 |
+
# -------- СИНХРОНИЗАЦИЯ instance → class (то, что читает U-Net) --------
|
| 415 |
+
DSHF.enabled = True
|
| 416 |
+
DSHF.actions = list(self._inst_actions)
|
| 417 |
+
DSHF.curve_enable = bool(self._inst_curve_enable)
|
| 418 |
+
DSHF.curve_type = str(self._inst_curve_type)
|
| 419 |
+
DSHF.curve_t_start = float(self._inst_curve_t_start)
|
| 420 |
+
DSHF.curve_t_end = float(self._inst_curve_t_end)
|
| 421 |
+
DSHF.curve_scale_start = float(self._inst_curve_scale_start)
|
| 422 |
+
DSHF.curve_scale_end = float(self._inst_curve_scale_end)
|
| 423 |
+
DSHF.curve_alpha = float(self._inst_curve_alpha)
|
| 424 |
+
DSHF.min_feature_size = int(self._inst_min_feature_size)
|
| 425 |
+
DSHF.auto_end_enable = bool(self._inst_auto_end_enable)
|
| 426 |
+
DSHF.auto_end_strength = float(self._inst_auto_end_strength)
|
| 427 |
+
DSHF.interp_method = str(self._inst_interp_method)
|
| 428 |
+
DSHF.interp_antialias = bool(self._inst_interp_antialias)
|
| 429 |
+
DSHF.channels_last = bool(self._inst_channels_last)
|
| 430 |
+
# experimental
|
| 431 |
+
DSHF.exp_section_enable = bool(self._inst_exp_section_enable)
|
| 432 |
+
DSHF.exp_enable = bool(self._inst_exp_section_enable and self._inst_exp_enable)
|
| 433 |
+
DSHF.exp_timestep = float(self._inst_exp_timestep)
|
| 434 |
+
DSHF.exp_scales = list(self._inst_exp_scales)
|
| 435 |
+
DSHF.exp_dilations = list(self._inst_exp_dilations)
|
| 436 |
+
DSHF.exp_in_muls = list(self._inst_exp_in_muls)
|
| 437 |
+
DSHF.exp_out_muls = list(self._inst_exp_out_muls)
|
| 438 |
+
DSHF.exp_cfg_muls = list(self._inst_exp_cfg_muls)
|
| 439 |
+
|
| 440 |
+
# --------- Профили (instance-вариант, чтобы сохранять то, что в UI) ---------
|
| 441 |
+
|
| 442 |
+
def _export_profile_instance(self) -> Dict[str, Any]:
|
| 443 |
+
return {
|
| 444 |
+
"actions": [a.__dict__ for a in self._inst_actions],
|
| 445 |
+
"curve": dict(
|
| 446 |
+
enable=self._inst_curve_enable, type=self._inst_curve_type,
|
| 447 |
+
t_start=self._inst_curve_t_start, t_end=self._inst_curve_t_end,
|
| 448 |
+
scale_start=self._inst_curve_scale_start, scale_end=self._inst_curve_scale_end,
|
| 449 |
+
alpha=self._inst_curve_alpha, min_feature=self._inst_min_feature_size,
|
| 450 |
+
auto_end_enable=self._inst_auto_end_enable, auto_end_strength=self._inst_auto_end_strength,
|
| 451 |
+
),
|
| 452 |
+
"runtime": dict(
|
| 453 |
+
interp_method=self._inst_interp_method, interp_antialias=self._inst_interp_antialias,
|
| 454 |
+
channels_last=self._inst_channels_last
|
| 455 |
+
),
|
| 456 |
+
"experimental": dict(
|
| 457 |
+
section_enable=self._inst_exp_section_enable, enable=self._inst_exp_enable, timestep=self._inst_exp_timestep,
|
| 458 |
+
scales=self._inst_exp_scales, dilations=self._inst_exp_dilations,
|
| 459 |
+
in_muls=self._inst_exp_in_muls, out_muls=self._inst_exp_out_muls, cfg_muls=self._inst_exp_cfg_muls
|
| 460 |
+
),
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
def _apply_profile_instance(self, data: Dict[str, Any]) -> None:
|
| 464 |
+
try:
|
| 465 |
+
self._inst_actions = [DSHFAction(bool(a.get("enable", True)),
|
| 466 |
+
float(a.get("timestep", 0)),
|
| 467 |
+
int(a.get("depth", 0)),
|
| 468 |
+
float(a.get("scale", 1.0)))
|
| 469 |
+
for a in data.get("actions", [])]
|
| 470 |
+
c = data.get("curve", {})
|
| 471 |
+
self._inst_curve_enable = bool(c.get("enable", False))
|
| 472 |
+
self._inst_curve_type = str(c.get("type", "linear"))
|
| 473 |
+
self._inst_curve_t_start = float(c.get("t_start", 800))
|
| 474 |
+
self._inst_curve_t_end = float(c.get("t_end", 200))
|
| 475 |
+
self._inst_curve_scale_start = float(c.get("scale_start", 1.0))
|
| 476 |
+
self._inst_curve_scale_end = float(c.get("scale_end", 1.0))
|
| 477 |
+
self._inst_curve_alpha = float(_clamp(float(c.get("alpha", 0.5)), 0.0, 1.0))
|
| 478 |
+
self._inst_min_feature_size = int(_clamp(float(c.get("min_feature", 8)), 2, 256))
|
| 479 |
+
self._inst_auto_end_enable = bool(c.get("auto_end_enable", False))
|
| 480 |
+
self._inst_auto_end_strength = float(_clamp(float(c.get("auto_end_strength", 0.35)), 0.0, 1.0))
|
| 481 |
+
r = data.get("runtime", {})
|
| 482 |
+
self._inst_interp_method = str(r.get("interp_method", self._inst_interp_method))
|
| 483 |
+
self._inst_interp_antialias = bool(r.get("interp_antialias", self._inst_interp_antialias))
|
| 484 |
+
self._inst_channels_last = bool(r.get("channels_last", self._inst_channels_last))
|
| 485 |
+
e = data.get("experimental", {})
|
| 486 |
+
self._inst_exp_section_enable = bool(e.get("section_enable", False))
|
| 487 |
+
self._inst_exp_enable = bool(e.get("enable", False))
|
| 488 |
+
self._inst_exp_timestep = float(e.get("timestep", 625))
|
| 489 |
+
self._inst_exp_scales = list(map(float, e.get("scales", [1.0])))
|
| 490 |
+
self._inst_exp_dilations = list(map(int, e.get("dilations", [1])))
|
| 491 |
+
self._inst_exp_in_muls = list(map(float, e.get("in_muls", [1.0])))
|
| 492 |
+
self._inst_exp_out_muls = list(map(float, e.get("out_muls", [1.0])))
|
| 493 |
+
self._inst_exp_cfg_muls = list(map(float, e.get("cfg_muls", [1.0])))
|
| 494 |
+
except Exception as ex:
|
| 495 |
+
print(f"[DSHF] Ошибка применения профиля: {ex}")
|
| 496 |
+
|
| 497 |
+
# --------- Обёртки Conv2d (работают только при включённом experimental) ---------
|
| 498 |
+
|
| 499 |
+
class DSHF_Scale(torch.nn.Module):
|
| 500 |
+
def __init__(self, conv2d_ref: List[torch.nn.Conv2d], get_rt):
|
| 501 |
+
super().__init__()
|
| 502 |
+
self.conv2d_ref = conv2d_ref
|
| 503 |
+
self.get_rt = get_rt # -> (mode, aa, min_feat)
|
| 504 |
+
|
| 505 |
+
def forward(self, h: torch.Tensor):
|
| 506 |
+
if not DSHF.exp_enable or DSHF.currentTimestep < DSHF.exp_timestep:
|
| 507 |
+
return h
|
| 508 |
+
mode, aa, min_feat = self.get_rt()
|
| 509 |
+
idx = DSHF.currentConv
|
| 510 |
+
pre_scale = 1.0 / _get_or_last(DSHF.exp_scales, idx, 1.0)
|
| 511 |
+
if pre_scale != 1.0:
|
| 512 |
+
h = _resize(h, pre_scale, mode, aa, min_feat)
|
| 513 |
+
conv = self.conv2d_ref[0]
|
| 514 |
+
k = conv.kernel_size if isinstance(conv.kernel_size, tuple) else (conv.kernel_size, conv.kernel_size)
|
| 515 |
+
if max(k) > 1:
|
| 516 |
+
dil = int(_get_or_last(DSHF.exp_dilations, idx, 1))
|
| 517 |
+
conv.dilation = (dil, dil); conv.padding = (dil, dil)
|
| 518 |
+
else:
|
| 519 |
+
conv.dilation = (1, 1); conv.padding = (0, 0)
|
| 520 |
+
return h
|
| 521 |
+
|
| 522 |
+
class DSHF_Unscale(torch.nn.Module):
|
| 523 |
+
def __init__(self, get_rt):
|
| 524 |
+
super().__init__()
|
| 525 |
+
self.get_rt = get_rt
|
| 526 |
+
|
| 527 |
+
def forward(self, h: torch.Tensor):
|
| 528 |
+
if not DSHF.exp_enable or DSHF.currentTimestep < DSHF.exp_timestep:
|
| 529 |
+
DSHF.currentConv += 1; return h
|
| 530 |
+
mode, aa, min_feat = self.get_rt()
|
| 531 |
+
idx = DSHF.currentConv
|
| 532 |
+
post_scale = _get_or_last(DSHF.exp_scales, idx, 1.0)
|
| 533 |
+
if post_scale != 1.0:
|
| 534 |
+
h = _resize(h, post_scale, mode, aa, min_feat)
|
| 535 |
+
alpha = float(DSHF.curve_alpha)
|
| 536 |
+
if alpha != 0.0:
|
| 537 |
+
h = h * (post_scale ** alpha)
|
| 538 |
+
DSHF.currentConv += 1
|
| 539 |
+
return h
|
| 540 |
+
|
| 541 |
+
class DSHF_InMul(torch.nn.Module):
|
| 542 |
+
def forward(self, h: torch.Tensor):
|
| 543 |
+
if not DSHF.exp_enable or DSHF.currentTimestep < DSHF.exp_timestep:
|
| 544 |
+
return h
|
| 545 |
+
mul = _get_or_last(DSHF.exp_in_muls, DSHF.currentBlock, 1.0)
|
| 546 |
+
return h if mul == 1.0 else h * float(mul)
|
| 547 |
+
|
| 548 |
+
class DSHF_OutMul(torch.nn.Module):
|
| 549 |
+
def forward(self, h: torch.Tensor):
|
| 550 |
+
if not DSHF.exp_enable or DSHF.currentTimestep < DSHF.exp_timestep:
|
| 551 |
+
return h
|
| 552 |
+
mul = _get_or_last(DSHF.exp_out_muls, DSHF.currentBlock, 1.0)
|
| 553 |
+
return h if mul == 1.0 else h * float(mul)
|
| 554 |
+
|
| 555 |
+
# ---------------- Подменённый U-Net ----------------
|
| 556 |
+
|
| 557 |
+
class DeepShrinkHiresFixUNet(sd_unet.SdUnet):
|
| 558 |
+
def __init__(self, _model):
|
| 559 |
+
super().__init__()
|
| 560 |
+
self.model = _model.to(devices.device)
|
| 561 |
+
getter = lambda: (DSHF.interp_method, DSHF.interp_antialias, DSHF.min_feature_size)
|
| 562 |
+
|
| 563 |
+
# Оборачивание слоёв
|
| 564 |
+
for i, input_block in enumerate(self.model.input_blocks):
|
| 565 |
+
for j, layer in enumerate(input_block):
|
| 566 |
+
if isinstance(layer, ResBlock):
|
| 567 |
+
for k, in_layer in enumerate(layer.in_layers):
|
| 568 |
+
if isinstance(in_layer, torch.nn.Conv2d):
|
| 569 |
+
self.model.input_blocks[i][j].in_layers[k] = torch.nn.Sequential(
|
| 570 |
+
DSHF.DSHF_Scale([in_layer], getter), in_layer, DSHF.DSHF_Unscale(getter), DSHF.DSHF_InMul()
|
| 571 |
+
)
|
| 572 |
+
for k, out_layer in enumerate(layer.out_layers):
|
| 573 |
+
if isinstance(out_layer, torch.nn.Conv2d):
|
| 574 |
+
self.model.input_blocks[i][j].out_layers[k] = torch.nn.Sequential(
|
| 575 |
+
DSHF.DSHF_Scale([out_layer], getter), out_layer, DSHF.DSHF_Unscale(getter), DSHF.DSHF_OutMul()
|
| 576 |
+
)
|
| 577 |
+
else:
|
| 578 |
+
if isinstance(layer, torch.nn.Conv2d):
|
| 579 |
+
self.model.input_blocks[i][j] = torch.nn.Sequential(
|
| 580 |
+
DSHF.DSHF_Scale([layer], getter), layer, DSHF.DSHF_Unscale(getter)
|
| 581 |
+
)
|
| 582 |
+
if isinstance(layer, Downsample):
|
| 583 |
+
layer.op = torch.nn.Sequential(DSHF.DSHF_Scale([layer.op], getter), layer.op, DSHF.DSHF_Unscale(getter))
|
| 584 |
+
if isinstance(layer, Upsample) and hasattr(layer, "conv") and isinstance(layer.conv, torch.nn.Conv2d):
|
| 585 |
+
layer.conv = torch.nn.Sequential(DSHF.DSHF_Scale([layer.conv], getter), layer.conv, DSHF.DSHF_Unscale(getter))
|
| 586 |
+
|
| 587 |
+
for j, layer in enumerate(self.model.middle_block):
|
| 588 |
+
if isinstance(layer, ResBlock):
|
| 589 |
+
for k, in_layer in enumerate(layer.in_layers):
|
| 590 |
+
if isinstance(in_layer, torch.nn.Conv2d):
|
| 591 |
+
self.model.middle_block[j].in_layers[k] = torch.nn.Sequential(
|
| 592 |
+
DSHF.DSHF_Scale([in_layer], getter), in_layer, DSHF.DSHF_Unscale(getter), DSHF.DSHF_InMul()
|
| 593 |
+
)
|
| 594 |
+
for k, out_layer in enumerate(layer.out_layers):
|
| 595 |
+
if isinstance(out_layer, torch.nn.Conv2d):
|
| 596 |
+
self.model.middle_block[j].out_layers[k] = torch.nn.Sequential(
|
| 597 |
+
DSHF.DSHF_Scale([out_layer], getter), out_layer, DSHF.DSHF_Unscale(getter), DSHF.DSHF_OutMul()
|
| 598 |
+
)
|
| 599 |
+
else:
|
| 600 |
+
if isinstance(layer, torch.nn.Conv2d):
|
| 601 |
+
self.model.middle_block[j] = torch.nn.Sequential(
|
| 602 |
+
DSHF.DSHF_Scale([layer], getter), layer, DSHF.DSHF_Unscale(getter)
|
| 603 |
+
)
|
| 604 |
+
if isinstance(layer, Downsample):
|
| 605 |
+
layer.op = torch.nn.Sequential(DSHF.DSHF_Scale([layer.op], getter), layer.op, DSHF.DSHF_Unscale(getter))
|
| 606 |
+
if isinstance(layer, Upsample) and hasattr(layer, "conv") and isinstance(layer.conv, torch.nn.Conv2d):
|
| 607 |
+
layer.conv = torch.nn.Sequential(DSHF.DSHF_Scale([layer.conv], getter), layer.conv, DSHF.DSHF_Unscale(getter))
|
| 608 |
+
|
| 609 |
+
for i, output_block in enumerate(self.model.output_blocks):
|
| 610 |
+
for j, layer in enumerate(output_block):
|
| 611 |
+
if isinstance(layer, ResBlock):
|
| 612 |
+
for k, in_layer in enumerate(layer.in_layers):
|
| 613 |
+
if isinstance(in_layer, torch.nn.Conv2d):
|
| 614 |
+
self.model.output_blocks[i][j].in_layers[k] = torch.nn.Sequential(
|
| 615 |
+
DSHF.DSHF_Scale([in_layer], getter), in_layer, DSHF.DSHF_Unscale(getter), DSHF.DSHF_InMul()
|
| 616 |
+
)
|
| 617 |
+
for k, out_layer in enumerate(layer.out_layers):
|
| 618 |
+
if isinstance(out_layer, torch.nn.Conv2d):
|
| 619 |
+
self.model.output_blocks[i][j].out_layers[k] = torch.nn.Sequential(
|
| 620 |
+
DSHF.DSHF_Scale([out_layer], getter), out_layer, DSHF.DSHF_Unscale(getter), DSHF.DSHF_OutMul()
|
| 621 |
+
)
|
| 622 |
+
else:
|
| 623 |
+
if isinstance(layer, torch.nn.Conv2d):
|
| 624 |
+
self.model.output_blocks[i][j] = torch.nn.Sequential(
|
| 625 |
+
DSHF.DSHF_Scale([layer], getter), layer, DSHF.DSHF_Unscale(getter)
|
| 626 |
+
)
|
| 627 |
+
if isinstance(layer, Downsample):
|
| 628 |
+
layer.op = torch.nn.Sequential(DSHF.DSHF_Scale([layer.op], getter), layer.op, DSHF.DSHF_Unscale(getter))
|
| 629 |
+
if isinstance(layer, Upsample) and hasattr(layer, "conv") and isinstance(layer.conv, torch.nn.Conv2d):
|
| 630 |
+
layer.conv = torch.nn.Sequential(DSHF.DSHF_Scale([layer.conv], getter), layer.conv, DSHF.DSHF_Unscale(getter))
|
| 631 |
+
|
| 632 |
+
for i, module in enumerate(self.model.out):
|
| 633 |
+
if isinstance(module, torch.nn.Conv2d):
|
| 634 |
+
self.model.out[i] = torch.nn.Sequential(DSHF.DSHF_Scale([module], getter), module, DSHF.DSHF_Unscale(getter))
|
| 635 |
+
|
| 636 |
+
def forward(self, x, timesteps, context, y=None, **kwargs):
|
| 637 |
+
assert (y is not None) == (self.model.num_classes is not None), "must specify y iff class-conditional"
|
| 638 |
+
if DSHF.channels_last:
|
| 639 |
+
x = x.contiguous(memory_format=torch.channels_last)
|
| 640 |
+
|
| 641 |
+
hs = []
|
| 642 |
+
emb = self.model.time_embed(timestep_embedding(timesteps, self.model.model_channels, repeat_only=False))
|
| 643 |
+
if self.model.num_classes is not None:
|
| 644 |
+
assert y.shape[0] == x.shape[0]
|
| 645 |
+
emb = emb + self.model.label_emb(y)
|
| 646 |
+
|
| 647 |
+
h = x.type(self.model.dtype)
|
| 648 |
+
depth = 0
|
| 649 |
+
DSHF.currentBlock = 0
|
| 650 |
+
DSHF.currentConv = 0
|
| 651 |
+
DSHF.currentTimestep = float(timesteps.detach().float().min().item())
|
| 652 |
+
|
| 653 |
+
# Входные блоки
|
| 654 |
+
for module in self.model.input_blocks:
|
| 655 |
+
s = DSHF._block_scale(depth, DSHF.currentTimestep)
|
| 656 |
+
if s is not None:
|
| 657 |
+
h = _resize(h, 1.0 / float(s), DSHF.interp_method, DSHF.interp_antialias, DSHF.min_feature_size)
|
| 658 |
+
context_tmp = context
|
| 659 |
+
if DSHF.exp_enable and DSHF.currentTimestep >= DSHF.exp_timestep:
|
| 660 |
+
cfg_mul = _get_or_last(DSHF.exp_cfg_muls, DSHF.currentBlock, 1.0)
|
| 661 |
+
if cfg_mul != 1.0:
|
| 662 |
+
context_tmp = context * float(cfg_mul)
|
| 663 |
+
h = module(h, emb, context_tmp)
|
| 664 |
+
hs.append(h)
|
| 665 |
+
depth += 1
|
| 666 |
+
DSHF.currentBlock += 1
|
| 667 |
+
|
| 668 |
+
# Средний блок
|
| 669 |
+
s = DSHF._block_scale(depth, DSHF.currentTimestep)
|
| 670 |
+
if s is not None:
|
| 671 |
+
h = _resize(h, 1.0 / float(s), DSHF.interp_method, DSHF.interp_antialias, DSHF.min_feature_size)
|
| 672 |
+
context_tmp = context
|
| 673 |
+
if DSHF.exp_enable and DSHF.currentTimestep >= DSHF.exp_timestep:
|
| 674 |
+
cfg_mul = _get_or_last(DSHF.exp_cfg_muls, DSHF.currentBlock, 1.0)
|
| 675 |
+
if cfg_mul != 1.0:
|
| 676 |
+
context_tmp = context * float(cfg_mul)
|
| 677 |
+
h = self.model.middle_block(h, emb, context_tmp)
|
| 678 |
+
s = DSHF._block_scale(depth, DSHF.currentTimestep)
|
| 679 |
+
if s is not None:
|
| 680 |
+
h = _resize(h, float(s), DSHF.interp_method, DSHF.interp_antialias, DSHF.min_feature_size)
|
| 681 |
+
DSHF.currentBlock += 1
|
| 682 |
+
|
| 683 |
+
# Выходные блоки
|
| 684 |
+
for module in self.model.output_blocks:
|
| 685 |
+
depth -= 1
|
| 686 |
+
h = torch.cat([h, hs.pop()], dim=1)
|
| 687 |
+
context_tmp = context
|
| 688 |
+
if DSHF.exp_enable and DSHF.currentTimestep >= DSHF.exp_timestep:
|
| 689 |
+
cfg_mul = _get_or_last(DSHF.exp_cfg_muls, DSHF.currentBlock, 1.0)
|
| 690 |
+
if cfg_mul != 1.0:
|
| 691 |
+
context_tmp = context * float(cfg_mul)
|
| 692 |
+
h = module(h, emb, context_tmp)
|
| 693 |
+
s = DSHF._block_scale(depth, DSHF.currentTimestep)
|
| 694 |
+
if s is not None:
|
| 695 |
+
h = _resize(h, float(s), DSHF.interp_method, DSHF.interp_antialias, DSHF.min_feature_size)
|
| 696 |
+
DSHF.currentBlock += 1
|
| 697 |
+
|
| 698 |
+
h = h.type(x.dtype)
|
| 699 |
+
return self.model.id_predictor(h) if self.model.predict_codebook_ids else self.model.out(h)
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
# Регистрация варианта U-Net
|
| 703 |
+
DeepShrinkHiresFixUNetOption = sd_unet.SdUnetOption()
|
| 704 |
+
DeepShrinkHiresFixUNetOption.label = "Deep Shrink Hires.fix"
|
| 705 |
+
DeepShrinkHiresFixUNetOption.create_unet = lambda: DSHF.DeepShrinkHiresFixUNet(shared.sd_model.model.diffusion_model)
|
| 706 |
+
script_callbacks.on_list_unets(lambda unets: unets.append(DeepShrinkHiresFixUNetOption))
|
DeepShrinkHires2.0.fix/scripts/__pycache__/DeepShrinkHires.fix.cpython-310.pyc
ADDED
|
Binary file (23.6 kB). View file
|
|
|