Goodis commited on
Commit
ca2a3d8
·
verified ·
1 Parent(s): fe95e6f

Upload 55 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. SwarmBlending.py +39 -0
  2. SwarmClipSeg.py +71 -0
  3. SwarmExtractLora.py +146 -0
  4. SwarmImages.py +379 -0
  5. SwarmInputNodes.py +299 -0
  6. SwarmInternalUtil.py +111 -0
  7. SwarmKSampler.py +356 -0
  8. SwarmLatents.py +37 -0
  9. SwarmLoadImageB64.py +47 -0
  10. SwarmLoraLoader.py +56 -0
  11. SwarmMasks.py +287 -0
  12. SwarmMath.py +23 -0
  13. SwarmReference.py +56 -0
  14. SwarmSaveImageWS.py +152 -0
  15. SwarmTextHandling.py +211 -0
  16. SwarmTiling.py +86 -0
  17. SwarmUnsampler.py +50 -0
  18. __init__.py +34 -0
  19. __pycache__/SwarmBlending.cpython-310.pyc +0 -0
  20. __pycache__/SwarmBlending.cpython-313.pyc +0 -0
  21. __pycache__/SwarmClipSeg.cpython-310.pyc +0 -0
  22. __pycache__/SwarmClipSeg.cpython-313.pyc +0 -0
  23. __pycache__/SwarmExtractLora.cpython-310.pyc +0 -0
  24. __pycache__/SwarmExtractLora.cpython-313.pyc +0 -0
  25. __pycache__/SwarmImages.cpython-310.pyc +0 -0
  26. __pycache__/SwarmImages.cpython-313.pyc +0 -0
  27. __pycache__/SwarmInputNodes.cpython-310.pyc +0 -0
  28. __pycache__/SwarmInputNodes.cpython-313.pyc +0 -0
  29. __pycache__/SwarmInternalUtil.cpython-310.pyc +0 -0
  30. __pycache__/SwarmInternalUtil.cpython-313.pyc +0 -0
  31. __pycache__/SwarmKSampler.cpython-310.pyc +0 -0
  32. __pycache__/SwarmKSampler.cpython-313.pyc +0 -0
  33. __pycache__/SwarmLatents.cpython-310.pyc +0 -0
  34. __pycache__/SwarmLatents.cpython-313.pyc +0 -0
  35. __pycache__/SwarmLoadImageB64.cpython-310.pyc +0 -0
  36. __pycache__/SwarmLoadImageB64.cpython-313.pyc +0 -0
  37. __pycache__/SwarmLoraLoader.cpython-310.pyc +0 -0
  38. __pycache__/SwarmLoraLoader.cpython-313.pyc +0 -0
  39. __pycache__/SwarmMasks.cpython-310.pyc +0 -0
  40. __pycache__/SwarmMasks.cpython-313.pyc +0 -0
  41. __pycache__/SwarmMath.cpython-310.pyc +0 -0
  42. __pycache__/SwarmMath.cpython-313.pyc +0 -0
  43. __pycache__/SwarmReference.cpython-310.pyc +0 -0
  44. __pycache__/SwarmReference.cpython-313.pyc +0 -0
  45. __pycache__/SwarmSaveImageWS.cpython-310.pyc +0 -0
  46. __pycache__/SwarmSaveImageWS.cpython-313.pyc +0 -0
  47. __pycache__/SwarmTextHandling.cpython-310.pyc +0 -0
  48. __pycache__/SwarmTextHandling.cpython-313.pyc +0 -0
  49. __pycache__/SwarmTiling.cpython-310.pyc +0 -0
  50. __pycache__/SwarmTiling.cpython-313.pyc +0 -0
SwarmBlending.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ class SwarmLatentBlendMasked:
4
+ @classmethod
5
+ def INPUT_TYPES(s):
6
+ return {
7
+ "required": {
8
+ "samples0": ("LATENT",),
9
+ "samples1": ("LATENT",),
10
+ "mask": ("MASK",),
11
+ "blend_factor": ("FLOAT", { "default": 0.5, "min": 0, "max": 1, "step": 0.01, "tooltip": "The blend factor between the two samples. 0 means entirely use sample0, 1 means entirely sample1, 0.5 means 50/50 of each." }),
12
+ }
13
+ }
14
+
15
+ RETURN_TYPES = ("LATENT",)
16
+ FUNCTION = "blend"
17
+ CATEGORY = "SwarmUI/images"
18
+ DESCRIPTION = "Blends two latent images together within a masked region."
19
+
20
+ def blend(self, samples0, samples1, blend_factor, mask):
21
+ samples_out = samples0.copy()
22
+ samples0 = samples0["samples"]
23
+ samples1 = samples1["samples"]
24
+ while mask.ndim < 4:
25
+ mask = mask.unsqueeze(0)
26
+
27
+ if samples0.shape != samples1.shape:
28
+ samples1 = torch.nn.functional.interpolate(samples1, size=(samples0.shape[2], samples0.shape[3]), mode="bicubic")
29
+ if samples0.shape != mask.shape:
30
+ mask = torch.nn.functional.interpolate(mask, size=(samples0.shape[2], samples0.shape[3]), mode="bicubic")
31
+
32
+ samples_blended = samples0 * (1 - mask * blend_factor) + samples1 * (mask * blend_factor)
33
+ samples_out["samples"] = samples_blended
34
+ return (samples_out,)
35
+
36
+
37
+ NODE_CLASS_MAPPINGS = {
38
+ "SwarmLatentBlendMasked": SwarmLatentBlendMasked,
39
+ }
SwarmClipSeg.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ import numpy as np
4
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
5
+ import folder_paths
6
+ import os, requests
7
+
8
+ def get_path():
9
+ if "clipseg" in folder_paths.folder_names_and_paths:
10
+ paths = folder_paths.folder_names_and_paths["clipseg"]
11
+ return paths[0][0]
12
+ else:
13
+ # Jank backup path if you're not running properly in Swarm
14
+ path = os.path.dirname(os.path.realpath(__file__)) + "/models"
15
+ return path
16
+
17
+
18
+ # Manual download of the model from a safetensors conversion.
19
+ # Done manually to guarantee it's only a safetensors file ever and not a pickle
20
+ def download_model(path, urlbase):
21
+ if os.path.exists(path):
22
+ return
23
+ for file in ["config.json", "merges.txt", "model.safetensors", "preprocessor_config.json", "special_tokens_map.json", "tokenizer_config.json", "vocab.json"]:
24
+ os.makedirs(path, exist_ok=True)
25
+ filepath = path + file
26
+ if not os.path.exists(filepath):
27
+ with open(filepath, "wb") as f:
28
+ print(f"[SwarmClipSeg] Downloading '{file}'...")
29
+ f.write(requests.get(f"{urlbase}{file}").content)
30
+
31
+
32
+ class SwarmClipSeg:
33
+ @classmethod
34
+ def INPUT_TYPES(s):
35
+ return {
36
+ "required": {
37
+ "images": ("IMAGE",),
38
+ "match_text": ("STRING", {"multiline": True, "tooltip": "A short description (a few words) to describe something within the image to find and mask."}),
39
+ "threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step":0.01, "round": False, "tooltip": "Threshold to apply to the mask, higher values will make the mask more strict. Without sufficient thresholding, CLIPSeg may include random stray content around the edges."}),
40
+ }
41
+ }
42
+
43
+ CATEGORY = "SwarmUI/masks"
44
+ RETURN_TYPES = ("MASK",)
45
+ FUNCTION = "seg"
46
+ DESCRIPTION = "Segment an image using CLIPSeg, creating a mask of what part of an image appears to match the given text."
47
+
48
+ def seg(self, images, match_text, threshold):
49
+ # TODO: Batch support?
50
+ i = 255.0 * images[0].cpu().numpy()
51
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
52
+ # TODO: Cache the model in RAM in some way?
53
+ path = get_path() + "/clipseg-rd64-refined-fp16-safetensors/"
54
+ download_model(path, "https://huggingface.co/mcmonkey/clipseg-rd64-refined-fp16/resolve/main/")
55
+ processor = CLIPSegProcessor.from_pretrained(path)
56
+ model = CLIPSegForImageSegmentation.from_pretrained(path)
57
+ with torch.no_grad():
58
+ mask = model(**processor(text=match_text, images=img, return_tensors="pt", padding=True))[0]
59
+ mask = torch.nn.functional.threshold(mask.sigmoid(), threshold, 0)
60
+ mask -= mask.min()
61
+ max = mask.max()
62
+ if max > 0:
63
+ mask /= max
64
+ while mask.ndim < 4:
65
+ mask = mask.unsqueeze(0)
66
+ mask = torch.nn.functional.interpolate(mask, size=(images.shape[1], images.shape[2]), mode="bilinear").squeeze(0)
67
+ return (mask,)
68
+
69
+ NODE_CLASS_MAPPINGS = {
70
+ "SwarmClipSeg": SwarmClipSeg,
71
+ }
SwarmExtractLora.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import comfy.model_management
2
+ import safetensors.torch
3
+ import torch, os, comfy, json
4
+
5
+ # ATTRIBUTION: This code is a mix of code from kohya-ss, comfy, and Swarm. It would be annoying to disentangle but it's all FOSS and relatively short so it's fine.
6
+
7
+ CLAMP_QUANTILE = 0.99
8
+ def extract_lora(diff, rank):
9
+ conv2d = (len(diff.shape) == 4)
10
+ kernel_size = None if not conv2d else diff.size()[2:4]
11
+ conv2d_3x3 = conv2d and kernel_size != (1, 1)
12
+ out_dim, in_dim = diff.size()[0:2]
13
+ rank = min(rank, in_dim, out_dim)
14
+
15
+ if conv2d:
16
+ if conv2d_3x3:
17
+ diff = diff.flatten(start_dim=1)
18
+ else:
19
+ diff = diff.squeeze()
20
+
21
+ U, S, Vh = torch.linalg.svd(diff.float())
22
+ U = U[:, :rank]
23
+ S = S[:rank]
24
+ U = U @ torch.diag(S)
25
+ Vh = Vh[:rank, :]
26
+
27
+ dist = torch.cat([U.flatten(), Vh.flatten()])
28
+ hi_val = torch.quantile(dist, CLAMP_QUANTILE)
29
+ low_val = -hi_val
30
+
31
+ U = U.clamp(low_val, hi_val)
32
+ Vh = Vh.clamp(low_val, hi_val)
33
+ if conv2d:
34
+ U = U.reshape(out_dim, rank, 1, 1)
35
+ Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1])
36
+ return (U, Vh)
37
+
38
+
39
+ def do_lora_handle(base_data, other_data, rank, prefix, require, do_bias, callback):
40
+ out_data = {}
41
+ device = comfy.model_management.get_torch_device()
42
+ for key in base_data.keys():
43
+ callback()
44
+ if key not in other_data:
45
+ continue
46
+ base_tensor = base_data[key].float()
47
+ other_tensor = other_data[key].float()
48
+ if key.startswith("clip_g"):
49
+ key = "1." + key[len("clip_g."):]
50
+ elif key.startswith("clip_l"):
51
+ key = "0." + key[len("clip_l."):]
52
+ if require:
53
+ if not key.startswith(require):
54
+ print(f"Ignore unmatched key {key} (doesn't match {require})")
55
+ continue
56
+ key = key[len(require):]
57
+ if base_tensor.shape != other_tensor.shape:
58
+ continue
59
+ diff = other_tensor.to(device) - base_tensor.to(device)
60
+ other_tensor = other_tensor.cpu()
61
+ base_tensor = base_tensor.cpu()
62
+ max_diff = float(diff.abs().max())
63
+ if max_diff < 1e-5:
64
+ print(f"discard unaltered key {key} ({max_diff})")
65
+ continue
66
+ if key.endswith(".weight"):
67
+ fixed_key = key[:-len(".weight")].replace('.', '_')
68
+ name = f"lora_{prefix}_{fixed_key}"
69
+ if len(base_tensor.shape) >= 2:
70
+ print(f"extract key {name} ({max_diff})")
71
+ out = extract_lora(diff, rank)
72
+ out_data[f"{name}.lora_up.weight"] = out[0].contiguous().half().cpu()
73
+ out_data[f"{name}.lora_down.weight"] = out[1].contiguous().half().cpu()
74
+ else:
75
+ print(f"ignore valid raw pass-through key {name} ({max_diff})")
76
+ #out_data[name] = other_tensor.contiguous().half().cpu()
77
+ elif key.endswith(".bias") and do_bias:
78
+ fixed_key = key[:-len(".bias")].replace('.', '_')
79
+ name = f"lora_{prefix}_{fixed_key}"
80
+ print(f"extract bias key {name} ({max_diff})")
81
+ out_data[f"{name}.diff_b"] = diff.contiguous().half().cpu()
82
+
83
+
84
+ return out_data
85
+
86
+ class SwarmExtractLora:
87
+ def __init__(self):
88
+ self.loaded_lora = None
89
+
90
+ @classmethod
91
+ def INPUT_TYPES(s):
92
+ return {
93
+ "required": {
94
+ "base_model": ("MODEL", ),
95
+ "base_model_clip": ("CLIP", ),
96
+ "other_model": ("MODEL", ),
97
+ "other_model_clip": ("CLIP", ),
98
+ "rank": ("INT", {"default": 16, "min": 1, "max": 320}),
99
+ "save_rawpath": ("STRING", {"multiline": False}),
100
+ "save_filename": ("STRING", {"multiline": False}),
101
+ "save_clip": ("BOOLEAN", {"default": True}),
102
+ "metadata": ("STRING", {"multiline": True}),
103
+ }
104
+ }
105
+
106
+ CATEGORY = "SwarmUI/models"
107
+ RETURN_TYPES = ()
108
+ FUNCTION = "extract_lora"
109
+ OUTPUT_NODE = True
110
+ DESCRIPTION = "Internal node, do not use directly - extracts a LoRA from the difference between two models. This is used by SwarmUI Utilities tab."
111
+
112
+ def extract_lora(self, base_model, base_model_clip, other_model, other_model_clip, rank, save_rawpath, save_filename, save_clip, metadata):
113
+ base_data = base_model.model_state_dict()
114
+ other_data = other_model.model_state_dict()
115
+ key_count = len(base_data.keys())
116
+ if save_clip:
117
+ key_count += len(base_model_clip.get_sd().keys())
118
+ pbar = comfy.utils.ProgressBar(key_count)
119
+ class Helper:
120
+ steps = 0
121
+ def callback(self):
122
+ self.steps += 1
123
+ pbar.update_absolute(self.steps, key_count, None)
124
+ helper = Helper()
125
+ out_data = do_lora_handle(base_data, other_data, rank, "unet", "diffusion_model.", True, lambda: helper.callback())
126
+ if save_clip:
127
+ # TODO: CLIP keys get wonky, this probably doesn't work? Model-arch-dependent.
128
+ out_clip = do_lora_handle(base_model_clip.get_sd(), other_model_clip.get_sd(), rank, "te_text_model_encoder_layers", "0.transformer.text_model.encoder.layers.", False, lambda: helper.callback())
129
+ out_clip = do_lora_handle(base_model_clip.get_sd(), other_model_clip.get_sd(), rank, "te2_text_model_encoder_layers", "1.transformer.text_model.encoder.layers.", False, lambda: helper.callback())
130
+ out_data.update(out_clip)
131
+
132
+ # Can't easily autodetect all the correct modelspec info, but at least supply some basics
133
+ out_metadata = {
134
+ "modelspec.title": f"(Extracted LoRA) {save_filename}",
135
+ "modelspec.description": f"LoRA extracted in SwarmUI"
136
+ }
137
+ if metadata:
138
+ out_metadata.update(json.loads(metadata))
139
+ path = f"{save_rawpath}{save_filename}.safetensors"
140
+ print(f"saving to path {path}")
141
+ safetensors.torch.save_file(out_data, path, metadata=out_metadata)
142
+ return ()
143
+
144
+ NODE_CLASS_MAPPINGS = {
145
+ "SwarmExtractLora": SwarmExtractLora,
146
+ }
SwarmImages.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import comfy
3
+ import math
4
+ from nodes import MAX_RESOLUTION
5
+
6
+ class SwarmImageScaleForMP:
7
+ @classmethod
8
+ def INPUT_TYPES(s):
9
+ return {
10
+ "required": {
11
+ "image": ("IMAGE",),
12
+ "width": ("INT", {"default": 0, "min": 0, "max": 8192, "tooltip": "The target width of the image."}),
13
+ "height": ("INT", {"default": 0, "min": 0, "max": 8192, "tooltip": "The target height of the image."}),
14
+ "can_shrink": ("BOOLEAN", {"default": True, "tooltip": "If true, the image can be shrunk to fit the target size, otherwise it will only be scaled up or left the same."}),
15
+ }
16
+ }
17
+
18
+ CATEGORY = "SwarmUI/images"
19
+ RETURN_TYPES = ("IMAGE",)
20
+ FUNCTION = "scale"
21
+ DESCRIPTION = "Scales an image to a target width and height, while keeping the aspect ratio."
22
+
23
+ def scale(self, image, width, height, can_shrink):
24
+ mpTarget = width * height
25
+ oldWidth = image.shape[2]
26
+ oldHeight = image.shape[1]
27
+
28
+ scale = math.sqrt(mpTarget / (oldWidth * oldHeight))
29
+ if not can_shrink and scale < 1:
30
+ return (image,)
31
+ newWid = int(round(oldWidth * scale / 64) * 64)
32
+ newHei = int(round(oldHeight * scale / 64) * 64)
33
+ samples = image.movedim(-1, 1)
34
+ s = comfy.utils.common_upscale(samples, newWid, newHei, "bilinear", "disabled")
35
+ s = s.movedim(1, -1)
36
+ return (s,)
37
+
38
+
39
+ class SwarmImageCrop:
40
+ @classmethod
41
+ def INPUT_TYPES(s):
42
+ return {
43
+ "required": {
44
+ "image": ("IMAGE",),
45
+ "x": ("INT", {"default": 0, "min": 0, "max": 8192, "step": 8, "tooltip": "The x coordinate in pixels of the top left corner of the crop."}),
46
+ "y": ("INT", {"default": 0, "min": 0, "max": 8192, "step": 8, "tooltip": "The y coordinate in pixels of the top left corner of the crop."}),
47
+ "width": ("INT", {"default": 512, "min": 64, "max": 8192, "step": 8, "tooltip": "The width in pixels of the crop."}),
48
+ "height": ("INT", {"default": 512, "min": 64, "max": 8192, "step": 8, "tooltip": "The height in pixels of the crop."}),
49
+ }
50
+ }
51
+
52
+ CATEGORY = "SwarmUI/images"
53
+ RETURN_TYPES = ("IMAGE",)
54
+ FUNCTION = "crop"
55
+ DESCRIPTION = "Crops an image to a specific region."
56
+
57
+ def crop(self, image, x, y, width, height):
58
+ if width <= 0 or height <= 0:
59
+ return (image,)
60
+ to_x = width + x
61
+ to_y = height + y
62
+ img = image[:, y:to_y, x:to_x, :]
63
+ return (img,)
64
+
65
+
66
+ class SwarmVideoBoomerang:
67
+ @classmethod
68
+ def INPUT_TYPES(s):
69
+ return {
70
+ "required": {
71
+ "images": ("IMAGE",),
72
+ }
73
+ }
74
+
75
+ CATEGORY = "SwarmUI/video"
76
+ RETURN_TYPES = ("IMAGE",)
77
+ FUNCTION = "boomerang"
78
+ DESCRIPTION = "Creates a boomerang effect by having the video play in reverse after the end, as a simple trick to make it appear to loop smoothly forever."
79
+
80
+ def boomerang(self, images):
81
+ # return images followed by reverse images
82
+ images = torch.cat((images, images.flip(0)), 0)
83
+ return (images,)
84
+
85
+
86
+ class SwarmImageNoise:
87
+ @classmethod
88
+ def INPUT_TYPES(s):
89
+ return {
90
+ "required": {
91
+ "image": ("IMAGE",),
92
+ "amount": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 10.0, "step": 0.01, "round": False}),
93
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
94
+ },
95
+ "optional": {
96
+ "mask": ("MASK",)
97
+ }
98
+ }
99
+
100
+ CATEGORY = "SwarmUI/images"
101
+ RETURN_TYPES = ("IMAGE",)
102
+ FUNCTION = "add_noise"
103
+ DESCRIPTION = "Adds random noise to an image."
104
+
105
+ def add_noise(self, image, amount, seed, mask=None):
106
+ generator = torch.manual_seed(seed)
107
+ while image.dim() < 4:
108
+ image = image.unsqueeze(0)
109
+ noise = torch.randn(image.size(), dtype=image.dtype, layout=image.layout, generator=generator, device="cpu") * amount
110
+ if mask is not None:
111
+ while mask.dim() < 4:
112
+ mask = mask.unsqueeze(0)
113
+ mask = torch.nn.functional.interpolate(mask.to(image.device), size=(image.shape[1], image.shape[2]), mode="bicubic")
114
+ if image.shape[3] == 3 and image.shape[1] > 3: # (channels-last)
115
+ mask = mask.movedim(1, -1)
116
+ noise = noise * mask
117
+ img = image + noise.to(image.device)
118
+ img = torch.clamp(img, 0, 1)
119
+ return (img,)
120
+
121
+
122
+ class SwarmTrimFrames:
123
+ @classmethod
124
+ def INPUT_TYPES(s):
125
+ return {
126
+ "required": {
127
+ "image": ("IMAGE",),
128
+ "trim_start": ("INT", {"default": 0, "min": 0, "max": 4096}),
129
+ "trim_end": ("INT", {"default": 0, "min": 0, "max": 4096})
130
+ }
131
+ }
132
+
133
+ CATEGORY = "SwarmUI/images"
134
+ RETURN_TYPES = ("IMAGE",)
135
+ FUNCTION = "trim"
136
+ DESCRIPTION = "Trims frames from the start and end of a video."
137
+
138
+ def trim(self, image, trim_start, trim_end):
139
+ if image.shape[0] <= 1:
140
+ return (image,)
141
+ s_in = image
142
+ start = max(0, min(s_in.shape[0], trim_start))
143
+ end = max(0, min(s_in.shape[0], trim_end))
144
+ s = s_in[start:s_in.shape[0] - end].clone()
145
+ return (s,)
146
+
147
+
148
+ class SwarmCountFrames:
149
+ @classmethod
150
+ def INPUT_TYPES(s):
151
+ return {
152
+ "required": {
153
+ "image": ("IMAGE",)
154
+ }
155
+ }
156
+
157
+ CATEGORY = "SwarmUI/images"
158
+ RETURN_TYPES = ("INT",)
159
+ FUNCTION = "count"
160
+ DESCRIPTION = "Counts the number of frames in an image."
161
+
162
+ def count(self, image):
163
+ return (image.shape[0],)
164
+
165
+
166
+ class SwarmImageWidth:
167
+ @classmethod
168
+ def INPUT_TYPES(s):
169
+ return {
170
+ "required": {
171
+ "image": ("IMAGE",)
172
+ }
173
+ }
174
+
175
+ CATEGORY = "SwarmUI/images"
176
+ RETURN_TYPES = ("INT",)
177
+ FUNCTION = "get_width"
178
+ DESCRIPTION = "Gets the width of an image."
179
+
180
+ def get_width(self, image):
181
+ return (image.shape[-2],)
182
+
183
+
184
+ class SwarmImageHeight:
185
+ @classmethod
186
+ def INPUT_TYPES(s):
187
+ return {
188
+ "required": {
189
+ "image": ("IMAGE",)
190
+ }
191
+ }
192
+
193
+ CATEGORY = "SwarmUI/images"
194
+ RETURN_TYPES = ("INT",)
195
+ FUNCTION = "get_height"
196
+ DESCRIPTION = "Gets the height of an image."
197
+
198
+ def get_height(self, image):
199
+ return (image.shape[-3],)
200
+
201
+
202
+ class SwarmImageCompositeMaskedColorCorrecting:
203
+ @classmethod
204
+ def INPUT_TYPES(s):
205
+ return {
206
+ "required": {
207
+ "destination": ("IMAGE",),
208
+ "source": ("IMAGE",),
209
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
210
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
211
+ "mask": ("MASK",),
212
+ "correction_method": (["None", "Uniform", "Linear", "Linear2"], )
213
+ }
214
+ }
215
+
216
+ CATEGORY = "SwarmUI/images"
217
+ RETURN_TYPES = ("IMAGE",)
218
+ FUNCTION = "composite"
219
+ DESCRIPTION = "Works like ImageCompositeMasked, but does color correction for inpainted images (ie outside-the-mask areas are expected to be identical)"
220
+
221
+ def composite(self, destination, source, x, y, mask, correction_method):
222
+ destination = destination.clone().movedim(-1, 1)
223
+ source = source.clone().movedim(-1, 1).to(destination.device)
224
+ source = comfy.utils.repeat_to_batch_size(source, destination.shape[0])
225
+
226
+ x = max(-source.shape[3], min(x, destination.shape[3]))
227
+ y = max(-source.shape[2], min(y, destination.shape[2]))
228
+
229
+ left, top = (x, y)
230
+ right, bottom = (left + source.shape[3], top + source.shape[2],)
231
+
232
+ mask = mask.to(destination.device, copy=True)
233
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear")
234
+ mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0])
235
+
236
+ visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
237
+
238
+ mask = mask[:, :, :visible_height, :visible_width]
239
+ inverse_mask = torch.ones_like(mask) - mask
240
+
241
+ source_section = source[:, :, :visible_height, :visible_width]
242
+ dest_section = destination[:, :, top:bottom, left:right]
243
+
244
+ # Fall through on "None"
245
+ if correction_method == "Uniform":
246
+ source_section = color_correct_uniform(source_section, dest_section, inverse_mask)
247
+ elif correction_method == "Linear":
248
+ source_section = color_correct_linear(source_section, dest_section, inverse_mask)
249
+ elif correction_method == "Linear2":
250
+ source_section = color_correct_linear2(source_section, dest_section, inverse_mask)
251
+
252
+ source_portion = mask * source_section
253
+ destination_portion = inverse_mask * dest_section
254
+
255
+ destination[:, :, top:bottom, left:right] = source_portion + destination_portion
256
+ return (destination.movedim(1, -1),)
257
+
258
+
259
+ def color_correct_uniform(source_section: torch.Tensor, dest_section: torch.Tensor, inverse_mask: torch.Tensor) -> torch.Tensor:
260
+ thresholded = (inverse_mask.clamp(0, 1) - 0.9999).clamp(0, 1) * 10000
261
+ thresholded_sum = thresholded.sum()
262
+ if thresholded_sum > 50:
263
+ source_hsv = rgb2hsv(source_section)
264
+ dest_hsv = rgb2hsv(dest_section)
265
+ source_hsv_masked = source_hsv * thresholded
266
+ dest_hsv_masked = dest_hsv * thresholded
267
+ diff = dest_hsv_masked - source_hsv_masked
268
+ diff = diff.sum(dim=[0, 2, 3]) / thresholded_sum
269
+ diff[0] = 0.0
270
+ diff = diff.unsqueeze(0).unsqueeze(2).unsqueeze(2)
271
+ source_hsv = source_hsv + diff
272
+ source_hsv = source_hsv.clamp(0, 1)
273
+ source_section = hsv2rgb(source_hsv)
274
+ return source_section
275
+
276
+
277
+ def color_correct_linear(source_section: torch.Tensor, dest_section: torch.Tensor, inverse_mask: torch.Tensor) -> torch.Tensor:
278
+ thresholded = (inverse_mask.clamp(0, 1) - 0.9999).clamp(0, 1) * 10000
279
+ thresholded_sum = thresholded.sum()
280
+ if thresholded_sum > 50:
281
+ source_hsv = rgb2hsv(source_section)
282
+ dest_hsv = rgb2hsv(dest_section)
283
+ source_h = source_hsv[:, 0:1, :, :]
284
+ source_s = linear_fit(source_hsv[:, 1:2, :, :], dest_hsv[:, 1:2, :, :], thresholded)
285
+ source_v = linear_fit(source_hsv[:, 2:3, :, :], dest_hsv[:, 2:3, :, :], thresholded)
286
+ source_hsv = torch.cat([source_h, source_s, source_v], dim=1)
287
+ source_section = hsv2rgb(source_hsv)
288
+ return source_section
289
+
290
+
291
+ # like color_correct_linear, but fits s*v and v instead of s and v to avoid instability from dark pixels
292
+ def color_correct_linear2(source_section: torch.Tensor, dest_section: torch.Tensor, inverse_mask: torch.Tensor) -> torch.Tensor:
293
+ thresholded = (inverse_mask.clamp(0, 1) - 0.9999).clamp(0, 1) * 10000
294
+ thresholded_sum = thresholded.sum()
295
+ if thresholded_sum > 50:
296
+ source_hsv = rgb2hsv(source_section)
297
+ dest_hsv = rgb2hsv(dest_section)
298
+ source_h = source_hsv[:, 0:1, :, :]
299
+ source_sv_mul = source_hsv[:, 1:2, :, :] * source_hsv[:, 2:3, :, :]
300
+ dest_sv_mul = dest_hsv[:, 1:2, :, :] * dest_hsv[:, 2:3, :, :]
301
+ source_sv_mul = linear_fit(source_sv_mul, dest_sv_mul, thresholded)
302
+ source_v = linear_fit(source_hsv[:, 2:3, :, :], dest_hsv[:, 2:3, :, :], thresholded)
303
+ source_s = torch.zeros_like(source_sv_mul)
304
+ source_s[source_v != 0] = source_sv_mul[source_v != 0] / source_v[source_v != 0]
305
+ source_s = source_s.clamp(0, 1)
306
+ source_hsv = torch.cat([source_h, source_s, source_v], dim=1)
307
+ source_section = hsv2rgb(source_hsv)
308
+ return source_section
309
+
310
+
311
+ def linear_fit(source_component: torch.Tensor, dest_component: torch.Tensor, thresholded: torch.Tensor) -> torch.Tensor:
312
+ thresholded_sum = thresholded.sum()
313
+ source_masked = source_component * thresholded
314
+ dest_masked = dest_component * thresholded
315
+ # Simple linear regression on dest as a function of source
316
+ source_mean = source_masked.sum(dim=[0, 2, 3]) / thresholded_sum
317
+ dest_mean = dest_masked.sum(dim=[0, 2, 3]) / thresholded_sum
318
+ source_mean = source_mean.unsqueeze(0).unsqueeze(2).unsqueeze(2)
319
+ dest_mean = dest_mean.unsqueeze(0).unsqueeze(2).unsqueeze(2)
320
+ source_deviation = (source_component - source_mean) * thresholded
321
+ dest_deviation = (dest_component - dest_mean) * thresholded
322
+ numerator = torch.sum(source_deviation * dest_deviation, (0, 2, 3))
323
+ denominator = torch.sum(source_deviation * source_deviation, (0, 2, 3))
324
+ # When all src the same color, we fall back to assuming m = 1 (uniform offset)
325
+ m = torch.where(denominator != 0, numerator / denominator, torch.tensor(1.0))
326
+ m = m.unsqueeze(0).unsqueeze(2).unsqueeze(2)
327
+ b = dest_mean - source_mean * m
328
+ source_component = m * source_component + b
329
+ source_component = source_component.clamp(0, 1)
330
+ return source_component
331
+
332
+
333
+ # from https://github.com/limacv/RGB_HSV_HSL
334
+ def rgb2hsv(rgb: torch.Tensor) -> torch.Tensor:
335
+ cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)
336
+ cmin = torch.min(rgb, dim=1, keepdim=True)[0]
337
+ delta = cmax - cmin
338
+ hsv_h = torch.empty_like(rgb[:, 0:1, :, :])
339
+ cmax_idx[delta == 0] = 3
340
+ hsv_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]
341
+ hsv_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]
342
+ hsv_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]
343
+ hsv_h[cmax_idx == 3] = 0.0
344
+ hsv_h /= 6.0
345
+ hsv_s = torch.where(cmax == 0, torch.tensor(0.0).type_as(rgb), delta / cmax)
346
+ hsv_v = cmax
347
+ return torch.cat([hsv_h, hsv_s, hsv_v], dim=1)
348
+
349
+
350
+ def hsv2rgb(hsv: torch.Tensor) -> torch.Tensor:
351
+ hsv_h, hsv_s, hsv_l = hsv[:, 0:1], hsv[:, 1:2], hsv[:, 2:3]
352
+ _c = hsv_l * hsv_s
353
+ _x = _c * (- torch.abs(hsv_h * 6.0 % 2.0 - 1) + 1.0)
354
+ _m = hsv_l - _c
355
+ _o = torch.zeros_like(_c)
356
+ idx = (hsv_h * 6.0).type(torch.uint8)
357
+ idx = (idx % 6).expand(-1, 3, -1, -1)
358
+ rgb = torch.empty_like(hsv)
359
+ rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]
360
+ rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]
361
+ rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]
362
+ rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]
363
+ rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]
364
+ rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]
365
+ rgb += _m
366
+ return rgb
367
+
368
+
369
+ NODE_CLASS_MAPPINGS = {
370
+ "SwarmImageScaleForMP": SwarmImageScaleForMP,
371
+ "SwarmImageCrop": SwarmImageCrop,
372
+ "SwarmVideoBoomerang": SwarmVideoBoomerang,
373
+ "SwarmImageNoise": SwarmImageNoise,
374
+ "SwarmTrimFrames": SwarmTrimFrames,
375
+ "SwarmCountFrames": SwarmCountFrames,
376
+ "SwarmImageWidth": SwarmImageWidth,
377
+ "SwarmImageHeight": SwarmImageHeight,
378
+ "SwarmImageCompositeMaskedColorCorrecting": SwarmImageCompositeMaskedColorCorrecting
379
+ }
SwarmInputNodes.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import SwarmLoadImageB64
2
+ import folder_paths
3
+ from nodes import CheckpointLoaderSimple, LoadImage
4
+ from comfy_extras.nodes_video import LoadVideo
5
+ from comfy_api.input_impl import VideoFromFile
6
+ import os, base64, io
7
+ try:
8
+ from comfy_extras.nodes_audio import LoadAudio
9
+ from comfy_extras.nodes_audio import load as raw_audio_load
10
+ except ImportError:
11
+ print("Error: Nodes_Audio failed to import")
12
+
13
+ INT_MAX = 0xffffffffffffffff
14
+ INT_MIN = -INT_MAX
15
+
16
+ class SwarmInputGroup:
17
+ @classmethod
18
+ def INPUT_TYPES(s):
19
+ return {
20
+ "required": {
21
+ "title": ("STRING", {"default": "My Group", "tooltip": "The title of the group."}),
22
+ "open_by_default": ("BOOLEAN", {"default": True, "tooltip": "Whether the group should be open by default."}),
23
+ "description": ("STRING", {"default": "", "multiline": True, "tooltip": "A description of the group that shows up when you click the '?' button."}),
24
+ "order_priority": ("FLOAT", {"default": 0, "min": -1024, "max": 1024, "step": 0.5, "round": 0.0000001, "tooltip": "The order priority of the group. Higher values go further down in the list of groups."}),
25
+ "is_advanced": ("BOOLEAN", {"default": False, "tooltip": "If true, the group will only be visible when 'Display Advanced' is clicked."}),
26
+ "can_shrink": ("BOOLEAN", {"default": True, "tooltip": "If true, the group can be collapsed by the user. If false, will be forced to remain open."}),
27
+ },
28
+ }
29
+
30
+ CATEGORY = "SwarmUI/inputs"
31
+ RETURN_TYPES = ("GROUP",)
32
+ FUNCTION = "do_input"
33
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Group defines a parameter groupging - link this to other nodes to create a collapsible group, all nodes this links to will be inside the group."
34
+
35
+ def do_input(self, **kwargs):
36
+ return (None, )
37
+
38
+
39
+ STANDARD_REQ_INPUTS = {
40
+ "description": ("STRING", {"default": "", "multiline": True, "tooltip": "A description of the input that shows up when you click the '?' button."}),
41
+ "order_priority": ("FLOAT", {"default": 0, "min": -1024, "max": 1024, "step": 0.5, "round": 0.0000001, "tooltip": "The order priority of the input. Higher values go further down in the list of inputs. This only applies within the group this node is part of."}),
42
+ "is_advanced": ("BOOLEAN", {"default": False, "tooltip": "If true, the input will only be visible when 'Display Advanced' is clicked."}),
43
+ "raw_id": ("STRING", {"default": "", "tooltip": "The raw ID of the input. This can be used to customize the input for API usage, or to make use of default SwarmUI parameters. Most of the time, you don't need to touch this. By default this will autogenerate a unique ID based on the title value."}),
44
+ }
45
+ STANDARD_OTHER_INPUTS = {
46
+ "optional": {
47
+ "group": ("GROUP", )
48
+ }
49
+ }
50
+
51
+
52
+ class SwarmInputInteger:
53
+ @classmethod
54
+ def INPUT_TYPES(s):
55
+ return {
56
+ "required": {
57
+ "title": ("STRING", {"default": "My Integer", "tooltip": "The name of the input."}),
58
+ "value": ("INT", {"default": 0, "min": INT_MIN, "max": INT_MAX, "step": 1, "tooltip": "The default value of the input."}),
59
+ "step": ("INT", {"default": 1, "min": INT_MIN, "max": INT_MAX, "step": 1, "tooltip": "The step size of the input. That is, how much the value changes when you click the up/down arrows or move the slider."}),
60
+ "min": ("INT", {"default": 0, "min": INT_MIN, "max": INT_MAX, "step": 1, "tooltip": "The minimum value of the input."}),
61
+ "max": ("INT", {"default": 100, "min": INT_MIN, "max": INT_MAX, "step": 1, "tooltip": "The maximum value of the input."}),
62
+ "view_max": ("INT", {"default": 100, "min": INT_MIN, "max": INT_MAX, "step": 1, "tooltip": "The maximum value of the input that is displayed in the UI when using a slider. This is useful if you want to allow a higher range of values, but don't want to clutter the UI with a huge slider."}),
63
+ "view_type": (["big", "small", "seed", "slider", "pot_slider"],{"tooltip": "The type of input control to use. 'big' is a large text input, 'small' is a small text input, 'seed' is a text input with seed-specific controls, 'slider' is a slider, and 'pot_slider' is a Power-Of-Two scaled slider - this is useful for large inputs like resolutions to allow a more natural feeling selection range."}),
64
+ } | STANDARD_REQ_INPUTS,
65
+ } | STANDARD_OTHER_INPUTS
66
+
67
+ CATEGORY = "SwarmUI/inputs"
68
+ RETURN_TYPES = ("INT",)
69
+ FUNCTION = "do_input"
70
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Integer lets you input a whole number without a decimal point."
71
+
72
+ def do_input(self, value, **kwargs):
73
+ return (value, )
74
+
75
+
76
+ class SwarmInputFloat:
77
+ @classmethod
78
+ def INPUT_TYPES(s):
79
+ return {
80
+ "required": {
81
+ "title": ("STRING", {"default": "My Floating-Point Number", "tooltip": "The name of the input."}),
82
+ "value": ("FLOAT", {"default": 0, "min": INT_MIN, "max": INT_MAX, "step": 0.01, "round": 0.0000001, "tooltip": "The default value of the input."}),
83
+ "step": ("FLOAT", {"default": 0.1, "min": INT_MIN, "max": INT_MAX, "step": 0.01, "round": 0.0000001, "tooltip": "The step size of the input. That is, how much the value changes when you click the up/down arrows or move the slider."}),
84
+ "min": ("FLOAT", {"default": 0, "min": INT_MIN, "max": INT_MAX, "step": 0.01, "round": 0.0000001, "tooltip": "The minimum value of the input."}),
85
+ "max": ("FLOAT", {"default": 100, "min": INT_MIN, "max": INT_MAX, "step": 0.01, "round": 0.0000001, "tooltip": "The maximum value of the input."}),
86
+ "view_max": ("FLOAT", {"default": 100, "min": INT_MIN, "max": INT_MAX, "step": 0.01, "round": 0.0000001, "tooltip": "The maximum value of the input that is displayed in the UI when using a slider. This is useful if you want to allow a higher range of values, but don't want to clutter the UI with a huge slider."}),
87
+ "view_type": (["big", "small", "slider", "pot_slider"], {"tooltip": "The type of input control to use. 'big' is a large text input, 'small' is a small text input, 'slider' is a slider, and 'pot_slider' is a Power-Of-Two scaled slider - this is useful for large inputs like resolutions to allow a more natural feeling selection range."}),
88
+ } | STANDARD_REQ_INPUTS,
89
+ } | STANDARD_OTHER_INPUTS
90
+
91
+ CATEGORY = "SwarmUI/inputs"
92
+ RETURN_TYPES = ("FLOAT",)
93
+ FUNCTION = "do_input"
94
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Float lets you input a number with a decimal point."
95
+
96
+ def do_input(self, value, **kwargs):
97
+ return (value, )
98
+
99
+
100
+ class SwarmInputText:
101
+ @classmethod
102
+ def INPUT_TYPES(s):
103
+ return {
104
+ "required": {
105
+ "title": ("STRING", {"default": "My Text", "tooltip": "The name of the input."}),
106
+ "value": ("STRING", {"default": "", "multiline": True, "tooltip": "The default value of the input."}),
107
+ "view_type": (["normal", "prompt", "big"], {"tooltip": "How to format this text input. 'normal' is a simple single line text input, 'prompt' is a prompt-like text input that has multiple lines and other prompting-specific features, 'big' is an extra large multiline text box."}),
108
+ } | STANDARD_REQ_INPUTS,
109
+ } | STANDARD_OTHER_INPUTS
110
+
111
+ CATEGORY = "SwarmUI/inputs"
112
+ RETURN_TYPES = ("STRING",)
113
+ FUNCTION = "do_input"
114
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Text lets you input a string of text. This can be simple text inputs, or prompt-like text inputs."
115
+
116
+ def do_input(self, value, **kwargs):
117
+ return (value, )
118
+
119
+
120
+ class SwarmInputModelName:
121
+ @classmethod
122
+ def INPUT_TYPES(s):
123
+ return {
124
+ "required": {
125
+ "title": ("STRING", {"default": "My Model Name Input", "tooltip": "The name of the input."}),
126
+ "value": ("STRING", {"default": "", "multiline": False, "tooltip": "The default value of the input."}),
127
+ "subtype": (["Stable-Diffusion", "VAE", "LoRA", "Embedding", "ControlNet", "ClipVision"], {"tooltip": "The model subtype to select from."}),
128
+ } | STANDARD_REQ_INPUTS,
129
+ } | STANDARD_OTHER_INPUTS
130
+
131
+ CATEGORY = "SwarmUI/inputs"
132
+ RETURN_TYPES = ("",)
133
+ FUNCTION = "do_input"
134
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Model Name lets you have a dropdown select for models of a given model sub-type."
135
+
136
+ def do_input(self, value, **kwargs):
137
+ return (value, )
138
+
139
+
140
+ class SwarmInputCheckpoint:
141
+ @classmethod
142
+ def INPUT_TYPES(s):
143
+ return {
144
+ "required": {
145
+ "title": ("STRING", {"default": "My Checkpoint Model Name Input", "tooltip": "The name of the input."}),
146
+ "value": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The default value of the input."}),
147
+ } | STANDARD_REQ_INPUTS,
148
+ } | STANDARD_OTHER_INPUTS
149
+
150
+ CATEGORY = "SwarmUI/inputs"
151
+ RETURN_TYPES = ("MODEL", "CLIP", "VAE")
152
+ FUNCTION = "do_input"
153
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Checkpoint lets you have a dropdown select for checkpoint models. Acts like the Checkpoint Loader node."
154
+
155
+ def do_input(self, value, **kwargs):
156
+ return CheckpointLoaderSimple().load_checkpoint(value)
157
+
158
+
159
+ class SwarmInputDropdown:
160
+ @classmethod
161
+ def INPUT_TYPES(s):
162
+ return {
163
+ "required": {
164
+ "title": ("STRING", {"default": "My Dropdown", "tooltip": "The name of the input."}),
165
+ "value": ("STRING", {"default": "", "multiline": False, "tooltip": "The default value of the input."}),
166
+ "values": ("STRING", {"default": "one, two, three", "multiline": True, "tooltip": "A comma-separated list of values to choose from. If you leave this blank, the dropdown will automatically load the value list from the connected node."}),
167
+ } | STANDARD_REQ_INPUTS,
168
+ } | STANDARD_OTHER_INPUTS
169
+
170
+ CATEGORY = "SwarmUI/inputs"
171
+ RETURN_TYPES = ("STRING", "",)
172
+ FUNCTION = "do_input"
173
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Dropdown lets you have a dropdown select for a list of values. You can leave the values list empty and attach the blank output to a real dropdown in another node to have it automatically load the value list from that connected node."
174
+
175
+ def do_input(self, value, **kwargs):
176
+ return (value, value, )
177
+
178
+
179
+ class SwarmInputBoolean:
180
+ @classmethod
181
+ def INPUT_TYPES(s):
182
+ return {
183
+ "required": {
184
+ "title": ("STRING", {"default": "My Boolean", "tooltip": "The name of the input."}),
185
+ "value": ("BOOLEAN", {"default": False, "tooltip": "The default value of the input."}),
186
+ } | STANDARD_REQ_INPUTS,
187
+ } | STANDARD_OTHER_INPUTS
188
+
189
+ CATEGORY = "SwarmUI/inputs"
190
+ RETURN_TYPES = ("BOOLEAN",)
191
+ FUNCTION = "do_input"
192
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Boolean lets you have a checkbox for a true/false value."
193
+
194
+ def do_input(self, value, **kwargs):
195
+ return (value, )
196
+
197
+
198
+ class SwarmInputImage:
199
+ @classmethod
200
+ def INPUT_TYPES(s):
201
+ input_dir = folder_paths.get_input_directory()
202
+ files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
203
+ files = folder_paths.filter_files_content_types(files, ["image"])
204
+ return {
205
+ "required": {
206
+ "title": ("STRING", {"default": "My Image", "tooltip": "The name of the input."}),
207
+ "value": ("STRING", {"default": "(Do Not Set Me)", "multiline": True, "tooltip": "Always leave this blank, the SwarmUI server will fill it for you."}),
208
+ "auto_resize": ("BOOLEAN", {"default": True, "tooltip": "If true, the image will be resized to match the current generation resolution. If false, the image will be kept at whatever size the user input it at."}),
209
+ } | STANDARD_REQ_INPUTS | {
210
+ "image": (sorted(files), {"image_upload": True}),
211
+ },
212
+ } | STANDARD_OTHER_INPUTS
213
+
214
+ CATEGORY = "SwarmUI/inputs"
215
+ RETURN_TYPES = ("IMAGE","MASK",)
216
+ FUNCTION = "do_input"
217
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Image lets you input an image. Internally this node uses a Base64 string as input when value is set by SwarmUI server (Generate tab), otherwise use select Image (Comfy Workflow tab)."
218
+
219
+ def do_input(self, value=None, image=None, **kwargs):
220
+ if not value or value == "(Do Not Set Me)":
221
+ return LoadImage().load_image(image)
222
+ else:
223
+ return SwarmLoadImageB64.b64_to_img_and_mask(value)
224
+
225
+
226
+ class SwarmInputAudio:
227
+ @classmethod
228
+ def INPUT_TYPES(s):
229
+ input_dir = folder_paths.get_input_directory()
230
+ files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
231
+ files = folder_paths.filter_files_content_types(files, ["audio"])
232
+ return {
233
+ "required": {
234
+ "title": ("STRING", {"default": "My Audio", "tooltip": "The name of the input."}),
235
+ "value": ("STRING", {"default": "(Do Not Set Me)", "multiline": True, "tooltip": "Always leave this blank, the SwarmUI server will fill it for you."}),
236
+ } | STANDARD_REQ_INPUTS | {
237
+ # TODO: This explodes the comfy frontend for some reason
238
+ #"audio": (sorted(files), {"audio_upload": True}),
239
+ },
240
+ } | STANDARD_OTHER_INPUTS
241
+
242
+ CATEGORY = "SwarmUI/inputs"
243
+ RETURN_TYPES = ("AUDIO",)
244
+ FUNCTION = "do_input"
245
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Audio lets you input an audio file. Internally this node uses a Base64 string as input when value is set by SwarmUI server (Generate tab), otherwise use select Audio (Comfy Workflow tab)."
246
+
247
+ def do_input(self, value=None, audio=None, **kwargs):
248
+ if not value or value == "(Do Not Set Me)":
249
+ return LoadAudio().load_audio(audio)
250
+ else:
251
+ audio_data = base64.b64decode(value)
252
+ audio_bytes = io.BytesIO(audio_data)
253
+ waveform, sample_rate = raw_audio_load(audio_bytes)
254
+ audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate}
255
+ return (audio, )
256
+
257
+
258
+ class SwarmInputVideo:
259
+ @classmethod
260
+ def INPUT_TYPES(s):
261
+ input_dir = folder_paths.get_input_directory()
262
+ files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
263
+ files = folder_paths.filter_files_content_types(files, ["video"])
264
+ return {
265
+ "required": {
266
+ "title": ("STRING", {"default": "My Video", "tooltip": "The name of the input."}),
267
+ "value": ("STRING", {"default": "(Do Not Set Me)", "multiline": True, "tooltip": "Always leave this blank, the SwarmUI server will fill it for you."}),
268
+ } | STANDARD_REQ_INPUTS | {
269
+ "video": (sorted(files), {"video_upload": True}),
270
+ },
271
+ } | STANDARD_OTHER_INPUTS
272
+
273
+ CATEGORY = "SwarmUI/inputs"
274
+ RETURN_TYPES = ("VIDEO",)
275
+ FUNCTION = "do_input"
276
+ DESCRIPTION = "SwarmInput nodes let you define custom input controls in Swarm-Comfy Workflows. Video lets you input a video file. Internally this node uses a Base64 string as input when value is set by SwarmUI server (Generate tab), otherwise use select Video (Comfy Workflow tab)."
277
+
278
+ def do_input(self, value=None, video=None, **kwargs):
279
+ if not value or value == "(Do Not Set Me)":
280
+ return LoadVideo.execute(video)
281
+ else:
282
+ video_data = base64.b64decode(value)
283
+ video_bytes = io.BytesIO(video_data)
284
+ return (VideoFromFile(video_bytes), )
285
+
286
+
287
+ NODE_CLASS_MAPPINGS = {
288
+ "SwarmInputGroup": SwarmInputGroup,
289
+ "SwarmInputInteger": SwarmInputInteger,
290
+ "SwarmInputFloat": SwarmInputFloat,
291
+ "SwarmInputText": SwarmInputText,
292
+ "SwarmInputModelName": SwarmInputModelName,
293
+ "SwarmInputCheckpoint": SwarmInputCheckpoint,
294
+ "SwarmInputDropdown": SwarmInputDropdown,
295
+ "SwarmInputBoolean": SwarmInputBoolean,
296
+ "SwarmInputImage": SwarmInputImage,
297
+ "SwarmInputAudio": SwarmInputAudio,
298
+ "SwarmInputVideo": SwarmInputVideo,
299
+ }
SwarmInternalUtil.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import comfy, folder_paths, execution
2
+ from server import PromptServer
3
+ from comfy import samplers
4
+ import functools
5
+
6
+ # This is purely a hack to provide a list of embeds in the object_info report.
7
+ # Code referenced from Comfy VAE impl. Probably does nothing useful in an actual workflow.
8
+ class SwarmEmbedLoaderListProvider:
9
+ @classmethod
10
+ def INPUT_TYPES(s):
11
+ return {
12
+ "required": {
13
+ "embed_name": (folder_paths.get_filename_list("embeddings"), )
14
+ }
15
+ }
16
+
17
+ CATEGORY = "SwarmUI/internal"
18
+ RETURN_TYPES = ("EMBEDDING",)
19
+ FUNCTION = "load_embed"
20
+ DESCRIPTION = "Internal node just intended to provide a list of currently known embeddings to Swarm. You can also use it to blindly load an embedding file if you need to."
21
+
22
+ def load_embed(self, embed_name):
23
+ embed_path = folder_paths.get_full_path("embedding", embed_name)
24
+ sd = comfy.utils.load_torch_file(embed_path)
25
+ return (sd,)
26
+
27
+
28
+ class SwarmJustLoadTheModelPlease:
29
+ @classmethod
30
+ def INPUT_TYPES(s):
31
+ return {
32
+ "required": {
33
+ "model": ("MODEL",),
34
+ "clip": ("CLIP,GEMMA",),
35
+ "vae": ("VAE",),
36
+ }
37
+ }
38
+
39
+ CATEGORY = "SwarmUI/internal"
40
+ RETURN_TYPES = ()
41
+ FUNCTION = "just_load"
42
+ OUTPUT_NODE = True
43
+ DESCRIPTION = "Internal node that acts as a final output for a model/clip/vae. This allows swarm to load models when needed without generating anything."
44
+
45
+ def just_load(self, model, clip, vae):
46
+ if model is None:
47
+ raise ValueError("The model failed to load")
48
+ if clip is None:
49
+ raise ValueError("The text encoders (CLIP) failed to load")
50
+ if vae is None:
51
+ raise ValueError("The VAE failed to load")
52
+ return {}
53
+
54
+
55
+ NODE_CLASS_MAPPINGS = {
56
+ "SwarmEmbedLoaderListProvider": SwarmEmbedLoaderListProvider,
57
+ "SwarmJustLoadTheModelPlease": SwarmJustLoadTheModelPlease
58
+ }
59
+
60
+
61
+ # This is a dirty hack to shut up the errors from Dropdown combo mismatch, pending Comfy upstream fix
62
+ ORIG_EXECUTION_VALIDATE = execution.validate_inputs
63
+ async def validate_inputs(prompt_id, prompt, item, validated):
64
+ raw_result = await ORIG_EXECUTION_VALIDATE(prompt_id, prompt, item, validated)
65
+ if raw_result is None:
66
+ return None
67
+ (did_succeed, errors, unique_id) = raw_result
68
+ if did_succeed:
69
+ return raw_result
70
+ for error in errors:
71
+ if error['type'] == "return_type_mismatch":
72
+ o_id = error['extra_info']['linked_node'][0]
73
+ o_class_type = prompt[o_id]['class_type']
74
+ if o_class_type == "SwarmInputModelName" or o_class_type == "SwarmInputDropdown":
75
+ errors.remove(error)
76
+ did_succeed = len(errors) == 0
77
+ return (did_succeed, errors, unique_id)
78
+
79
+ execution.validate_inputs = validate_inputs
80
+
81
+ # Comfy's app logger has broken terminal compat, so violently force it to auto-flush
82
+ try:
83
+ from app import logger
84
+ def patch_interceptor(interceptor):
85
+ if interceptor:
86
+ orig = interceptor.write
87
+ def write(self, data):
88
+ orig(data)
89
+ self.flush()
90
+ interceptor.write = functools.partial(write, interceptor)
91
+ # Force UTF-8 too, to prevent encoding errors (Comfy will full crash outputting some languages)
92
+ # (Swarm's C# engine has code to forcibly assume UTF-8, so this is safe. Otherwise it would wonk the terminal if the terminal isn't set to UTF-8)
93
+ interceptor.reconfigure(encoding='utf-8')
94
+ patch_interceptor(logger.stdout_interceptor)
95
+ patch_interceptor(logger.stderr_interceptor)
96
+ except Exception as e:
97
+ import traceback
98
+ traceback.print_exc()
99
+
100
+ # comfy's server/PromptServer is janky with EventID=4, so overwrite send_bytes to interpret EventID=9999123 as 4
101
+ try:
102
+ server = PromptServer.instance
103
+ orig = server.send_bytes
104
+ async def send_bytes(self, event, data, sid=None):
105
+ if event == 9999123:
106
+ event = 4
107
+ await orig(event, data, sid=sid)
108
+ server.send_bytes = functools.partial(send_bytes, server)
109
+ except Exception as e:
110
+ import traceback
111
+ traceback.print_exc()
SwarmKSampler.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, struct, json
2
+ from io import BytesIO
3
+ import latent_preview, comfy
4
+ from server import PromptServer
5
+ from comfy.model_base import SDXL, SVD_img2vid, Flux, WAN21, Chroma
6
+ from comfy import samplers
7
+ import numpy as np
8
+ from math import ceil
9
+ from latent_preview import TAESDPreviewerImpl
10
+ from comfy_execution.utils import get_executing_context
11
+
12
+ def slerp(val, low, high):
13
+ low_norm = low / torch.norm(low, dim=1, keepdim=True)
14
+ high_norm = high / torch.norm(high, dim=1, keepdim=True)
15
+ dot = (low_norm * high_norm).sum(1)
16
+ if dot.mean() > 0.9995:
17
+ return low * val + high * (1 - val)
18
+ omega = torch.acos(dot)
19
+ so = torch.sin(omega)
20
+ res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
21
+ return res
22
+
23
+ def swarm_partial_noise(seed, latent_image):
24
+ generator = torch.manual_seed(seed)
25
+ return torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
26
+
27
+ def swarm_fixed_noise(seed, latent_image, var_seed, var_seed_strength):
28
+ noises = []
29
+ for i in range(latent_image.size()[0]):
30
+ if var_seed_strength > 0:
31
+ noise = swarm_partial_noise(seed, latent_image[i])
32
+ var_noise = swarm_partial_noise(var_seed + i, latent_image[i])
33
+ if noise.ndim == 4: # Video models are B C F H W, we're in a B loop already so sub-iterate over F (Frames)
34
+ for j in range(noise.shape[1]):
35
+ noise[:, j] = slerp(var_seed_strength, noise[:, j], var_noise[:, j])
36
+ else:
37
+ noise = slerp(var_seed_strength, noise, var_noise)
38
+ else:
39
+ noise = swarm_partial_noise(seed + i, latent_image[i])
40
+ noises.append(noise)
41
+ return torch.stack(noises, dim=0)
42
+
43
+ def get_preview_metadata():
44
+ executing_context = get_executing_context()
45
+ prompt_id = None
46
+ node_id = None
47
+ if executing_context is not None:
48
+ prompt_id = executing_context.prompt_id
49
+ node_id = executing_context.node_id
50
+ if prompt_id is None:
51
+ prompt_id = PromptServer.instance.last_prompt_id
52
+ if node_id is None:
53
+ node_id = PromptServer.instance.last_node_id
54
+ return {"node_id": node_id, "prompt_id": prompt_id, "display_node_id": node_id, "parent_node_id": node_id, "real_node_id": node_id} # display_node_id, parent_node_id, real_node_id? comfy_execution/progress.py has this.
55
+
56
+ def swarm_send_extra_preview(id, image):
57
+ server = PromptServer.instance
58
+ metadata = get_preview_metadata()
59
+ metadata["mime_type"] = "image/jpeg"
60
+ metadata["id"] = id
61
+ metadata_json = json.dumps(metadata).encode('utf-8')
62
+ bytesIO = BytesIO()
63
+ image.save(bytesIO, format="JPEG", quality=90, compress_level=4)
64
+ image_bytes = bytesIO.getvalue()
65
+ combined_data = bytearray()
66
+ combined_data.extend(struct.pack(">I", len(metadata_json)))
67
+ combined_data.extend(metadata_json)
68
+ combined_data.extend(image_bytes)
69
+ server.send_sync(9999123, combined_data, sid=server.client_id)
70
+
71
+ def swarm_send_animated_preview(id, images):
72
+ server = PromptServer.instance
73
+ bytesIO = BytesIO()
74
+ images[0].save(bytesIO, save_all=True, duration=int(1000.0/6), append_images=images[1 : len(images)], lossless=False, quality=60, method=0, format='WEBP')
75
+ bytesIO.seek(0)
76
+ image_bytes = bytesIO.getvalue()
77
+ metadata = get_preview_metadata()
78
+ metadata["mime_type"] = "image/webp"
79
+ metadata["id"] = id
80
+ metadata_json = json.dumps(metadata).encode('utf-8')
81
+ combined_data = bytearray()
82
+ combined_data.extend(struct.pack(">I", len(metadata_json)))
83
+ combined_data.extend(metadata_json)
84
+ combined_data.extend(image_bytes)
85
+ server.send_sync(9999123, combined_data, sid=server.client_id)
86
+
87
+ def calculate_sigmas_scheduler(model, scheduler_name, steps, sigma_min, sigma_max, rho):
88
+ model_sampling = model.get_model_object("model_sampling")
89
+ if scheduler_name == "karras":
90
+ return comfy.k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min if sigma_min >= 0 else float(model_sampling.sigma_min), sigma_max=sigma_max if sigma_max >= 0 else float(model_sampling.sigma_max), rho=rho)
91
+ elif scheduler_name == "exponential":
92
+ return comfy.k_diffusion.sampling.get_sigmas_exponential(n=steps, sigma_min=sigma_min if sigma_min >= 0 else float(model_sampling.sigma_min), sigma_max=sigma_max if sigma_max >= 0 else float(model_sampling.sigma_max))
93
+ else:
94
+ return None
95
+
96
+ def make_swarm_sampler_callback(steps, device, model, previews):
97
+ previewer = latent_preview.get_previewer(device, model.model.latent_format) if previews != "none" else None
98
+ pbar = comfy.utils.ProgressBar(steps)
99
+ def callback(step, x0, x, total_steps):
100
+ pbar.update_absolute(step + 1, total_steps, None)
101
+ if previewer:
102
+ if (step == 0 or (step < 3 and x0.ndim == 5 and x0.shape[1] > 8)) and not isinstance(previewer, TAESDPreviewerImpl):
103
+ x0 = x0.clone().cpu() # Sync copy to CPU for first few steps to prevent reading old data, more steps for videos. Future steps allow comfy to do its async non_blocky stuff.
104
+ if x0.ndim == 5:
105
+ # video shape is [batch, channels, backwards time, width, height], for previews needs to be swapped to [forwards time, channels, width, height]
106
+ x0 = x0[0].permute(1, 0, 2, 3)
107
+ x0 = torch.flip(x0, [0])
108
+ def do_preview(id, index):
109
+ preview_img = previewer.decode_latent_to_preview_image("JPEG", x0[index:index+1])
110
+ swarm_send_extra_preview(id, preview_img[1])
111
+ if previews == "iterate":
112
+ do_preview(0, step % x0.shape[0])
113
+ elif previews == "animate":
114
+ if x0.shape[0] == 1:
115
+ do_preview(0, 0)
116
+ else:
117
+ images = []
118
+ for i in range(x0.shape[0]):
119
+ preview_img = previewer.decode_latent_to_preview_image("JPEG", x0[i:i+1])
120
+ images.append(preview_img[1])
121
+ swarm_send_animated_preview(0, images)
122
+ elif previews == "default":
123
+ for i in range(x0.shape[0]):
124
+ preview_img = previewer.decode_latent_to_preview_image("JPEG", x0[i:i+1])
125
+ swarm_send_extra_preview(i, preview_img[1])
126
+ elif previews == "one":
127
+ do_preview(0, 0)
128
+ elif previews == "second":
129
+ do_preview(0, 1 % x0.shape[0])
130
+ return callback
131
+
132
+
133
+ def loglinear_interp(t_steps, num_steps):
134
+ """
135
+ Performs log-linear interpolation of a given array of decreasing numbers.
136
+ """
137
+ xs = np.linspace(0, 1, len(t_steps))
138
+ ys = np.log(t_steps[::-1])
139
+
140
+ new_xs = np.linspace(0, 1, num_steps)
141
+ new_ys = np.interp(new_xs, xs, ys)
142
+
143
+ interped_ys = np.exp(new_ys)[::-1].copy()
144
+ return interped_ys
145
+
146
+ AYS_NOISE_LEVELS = {
147
+ "SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
148
+ "SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
149
+ "SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002],
150
+ # Flux and Wan from https://github.com/comfyanonymous/ComfyUI/pull/7584
151
+ "Flux": [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001],
152
+ "Wan": [1.0, 0.997, 0.995, 0.993, 0.991, 0.989, 0.987, 0.985, 0.98, 0.975, 0.973, 0.968, 0.96, 0.946, 0.927, 0.902, 0.864, 0.776, 0.539, 0.208, 0.001],
153
+ # https://github.com/comfyanonymous/ComfyUI/commit/08ff5fa08a92e0b3f23b9abec979a830a6cffb03#diff-3e4e70e402dcd9e1070ad71ef9292277f10d9faccf36a1c405c0c717a7ee6485R23
154
+ "Chroma": [0.992, 0.99, 0.988, 0.985, 0.982, 0.978, 0.973, 0.968, 0.961, 0.953, 0.943, 0.931, 0.917, 0.9, 0.881, 0.858, 0.832, 0.802, 0.769, 0.731, 0.69, 0.646, 0.599, 0.55, 0.501, 0.451, 0.402, 0.355, 0.311, 0.27, 0.232, 0.199, 0.169, 0.143, 0.12, 0.101, 0.084, 0.07, 0.058, 0.048, 0.001]
155
+ }
156
+
157
+ def split_latent_tensor(latent_tensor, tile_size=1024, scale_factor=8):
158
+ """Generate tiles for a given latent tensor, considering the scaling factor."""
159
+ latent_tile_size = tile_size // scale_factor # Adjust tile size for latent space
160
+ height, width = latent_tensor.shape[-2:]
161
+
162
+ # Determine the number of tiles needed
163
+ num_tiles_x = ceil(width / latent_tile_size)
164
+ num_tiles_y = ceil(height / latent_tile_size)
165
+
166
+ # If width or height is an exact multiple of the tile size, add an additional tile for overlap
167
+ if width % latent_tile_size == 0:
168
+ num_tiles_x += 1
169
+ if height % latent_tile_size == 0:
170
+ num_tiles_y += 1
171
+
172
+ # Calculate the overlap
173
+ overlap_x = 0 if num_tiles_x == 1 else (num_tiles_x * latent_tile_size - width) / (num_tiles_x - 1)
174
+ overlap_y = 0 if num_tiles_y == 1 else (num_tiles_y * latent_tile_size - height) / (num_tiles_y - 1)
175
+ if overlap_x < 32 and num_tiles_x > 1:
176
+ num_tiles_x += 1
177
+ overlap_x = (num_tiles_x * latent_tile_size - width) / (num_tiles_x - 1)
178
+ if overlap_y < 32 and num_tiles_y > 1:
179
+ num_tiles_y += 1
180
+ overlap_y = (num_tiles_y * latent_tile_size - height) / (num_tiles_y - 1)
181
+
182
+ tiles = []
183
+
184
+ for i in range(num_tiles_y):
185
+ for j in range(num_tiles_x):
186
+ x_start = j * latent_tile_size - j * overlap_x
187
+ y_start = i * latent_tile_size - i * overlap_y
188
+
189
+ # Correct for potential float precision issues
190
+ x_start = round(x_start)
191
+ y_start = round(y_start)
192
+
193
+ # Crop the tile from the latent tensor
194
+ tile_tensor = latent_tensor[..., y_start:y_start + latent_tile_size, x_start:x_start + latent_tile_size]
195
+ tiles.append(((x_start, y_start, x_start + latent_tile_size, y_start + latent_tile_size), tile_tensor))
196
+
197
+ return tiles
198
+
199
+ def stitch_latent_tensors(original_size, tiles, scale_factor=8):
200
+ """Stitch tiles together to create the final upscaled latent tensor with overlaps."""
201
+ result = torch.zeros(original_size)
202
+
203
+ # We assume tiles come in the format [(coordinates, tile), ...]
204
+ sorted_tiles = sorted(tiles, key=lambda x: (x[0][1], x[0][0])) # Sort by upper then left
205
+
206
+ # Variables to keep track of the current row's starting point
207
+ current_row_upper = None
208
+
209
+ for (left, upper, right, lower), tile in sorted_tiles:
210
+
211
+ # Check if we're starting a new row
212
+ if current_row_upper != upper:
213
+ current_row_upper = upper
214
+ first_tile_in_row = True
215
+ else:
216
+ first_tile_in_row = False
217
+
218
+ tile_width = right - left
219
+ tile_height = lower - upper
220
+ feather = tile_width // 8 # Assuming feather size is consistent with the example
221
+
222
+ mask = torch.ones_like(tile)
223
+
224
+ if not first_tile_in_row: # Left feathering for tiles other than the first in the row
225
+ for t in range(feather):
226
+ mask[..., :, t:t+1] *= (1.0 / feather) * (t + 1)
227
+
228
+ if upper != 0: # Top feathering for all tiles except the first row
229
+ for t in range(feather):
230
+ mask[..., t:t+1, :] *= (1.0 / feather) * (t + 1)
231
+
232
+ # Apply the feathering mask
233
+ combined_area = tile * mask + result[..., upper:lower, left:right] * (1.0 - mask)
234
+ result[..., upper:lower, left:right] = combined_area
235
+
236
+ return result
237
+
238
+ class SwarmKSampler:
239
+ @classmethod
240
+ def INPUT_TYPES(s):
241
+ return {
242
+ "required": {
243
+ "model": ("MODEL",),
244
+ "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
245
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
246
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.5, "round": 0.001}),
247
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
248
+ "scheduler": (["turbo", "align_your_steps", "ltxv", "ltxv-image"] + comfy.samplers.KSampler.SCHEDULERS, ),
249
+ "positive": ("CONDITIONING", ),
250
+ "negative": ("CONDITIONING", ),
251
+ "latent_image": ("LATENT", ),
252
+ "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
253
+ "end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
254
+ "var_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
255
+ "var_seed_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05, "round": 0.001}),
256
+ "sigma_max": ("FLOAT", {"default": -1, "min": -1.0, "max": 1000.0, "step":0.01, "round": False}),
257
+ "sigma_min": ("FLOAT", {"default": -1, "min": -1.0, "max": 1000.0, "step":0.01, "round": False}),
258
+ "rho": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
259
+ "add_noise": (["enable", "disable"], ),
260
+ "return_with_leftover_noise": (["disable", "enable"], ),
261
+ "previews": (["default", "none", "one", "second", "iterate", "animate"], ),
262
+ "tile_sample": ("BOOLEAN", {"default": False}),
263
+ "tile_size": ("INT", {"default": 1024, "min": 256, "max": 4096}),
264
+ }
265
+ }
266
+
267
+ CATEGORY = "SwarmUI/sampling"
268
+ RETURN_TYPES = ("LATENT",)
269
+ FUNCTION = "run_sampling"
270
+ DESCRIPTION = "Works like a vanilla Comfy KSamplerAdvanced, but with extra inputs for advanced features such as sigma scale, tiling, previews, etc."
271
+
272
+ def sample(self, model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, var_seed, var_seed_strength, sigma_max, sigma_min, rho, add_noise, return_with_leftover_noise, previews):
273
+ device = comfy.model_management.get_torch_device()
274
+ latent_samples = latent_image["samples"]
275
+ latent_samples = comfy.sample.fix_empty_latent_channels(model, latent_samples)
276
+ disable_noise = add_noise == "disable"
277
+
278
+ if disable_noise:
279
+ noise = torch.zeros(latent_samples.size(), dtype=latent_samples.dtype, layout=latent_samples.layout, device="cpu")
280
+ else:
281
+ noise = swarm_fixed_noise(noise_seed, latent_samples, var_seed, var_seed_strength)
282
+
283
+ noise_mask = None
284
+ if "noise_mask" in latent_image:
285
+ noise_mask = latent_image["noise_mask"]
286
+
287
+ sigmas = None
288
+ if scheduler == "turbo":
289
+ timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps]
290
+ sigmas = model.model.model_sampling.sigma(timesteps)
291
+ sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
292
+ elif scheduler == "ltx" or scheduler == "ltxv-image":
293
+ from comfy_extras.nodes_lt import LTXVScheduler
294
+ sigmas = LTXVScheduler().get_sigmas(steps, 2.05, 0.95, True, 0.1, latent_image if scheduler == "ltxv-image" else None)[0]
295
+ elif scheduler == "align_your_steps":
296
+ if isinstance(model.model, SDXL):
297
+ model_type = "SDXL"
298
+ elif isinstance(model.model, SVD_img2vid):
299
+ model_type = "SVD"
300
+ elif isinstance(model.model, Flux):
301
+ model_type = "Flux"
302
+ elif isinstance(model.model, WAN21):
303
+ model_type = "Wan"
304
+ elif isinstance(model.model, Chroma):
305
+ model_type = "Chroma"
306
+ else:
307
+ print(f"AlignYourSteps: Unknown model type: {type(model.model)}, defaulting to SD1")
308
+ model_type = "SD1"
309
+ sigmas = AYS_NOISE_LEVELS[model_type][:]
310
+ if (steps + 1) != len(sigmas):
311
+ sigmas = loglinear_interp(sigmas, steps + 1)
312
+ sigmas[-1] = 0
313
+ sigmas = torch.FloatTensor(sigmas)
314
+ elif sigma_min >= 0 and sigma_max >= 0 and scheduler in ["karras", "exponential"]:
315
+ if sampler_name in ['dpm_2', 'dpm_2_ancestral']:
316
+ sigmas = calculate_sigmas_scheduler(model, scheduler, steps + 1, sigma_min, sigma_max, rho)
317
+ sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
318
+ else:
319
+ sigmas = calculate_sigmas_scheduler(model, scheduler, steps, sigma_min, sigma_max, rho)
320
+ sigmas = sigmas.to(device)
321
+
322
+ out = latent_image.copy()
323
+ if steps > 0:
324
+ callback = make_swarm_sampler_callback(steps, device, model, previews)
325
+
326
+ samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_samples,
327
+ denoise=1.0, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step,
328
+ force_full_denoise=return_with_leftover_noise == "disable", noise_mask=noise_mask, sigmas=sigmas, callback=callback, seed=noise_seed)
329
+ out["samples"] = samples
330
+ return (out, )
331
+
332
+ # tiled sample version of sample function
333
+ def tiled_sample(self, model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, var_seed, var_seed_strength, sigma_max, sigma_min, rho, add_noise, return_with_leftover_noise, previews, tile_size):
334
+ out = latent_image.copy()
335
+ # split image into tiles
336
+ latent_samples = latent_image["samples"]
337
+ tiles = split_latent_tensor(latent_samples, tile_size=tile_size)
338
+ # resample each tile using self.sample
339
+ resampled_tiles = []
340
+ for coords, tile in tiles:
341
+ resampled_tile = self.sample(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, {"samples": tile}, start_at_step, end_at_step, var_seed, var_seed_strength, sigma_max, sigma_min, rho, add_noise, return_with_leftover_noise, previews)
342
+ resampled_tiles.append((coords, resampled_tile[0]["samples"]))
343
+ # stitch the tiles to get the final upscaled image
344
+ result = stitch_latent_tensors(latent_samples.shape, resampled_tiles)
345
+ out["samples"] = result
346
+ return (out,)
347
+
348
+ def run_sampling(self, model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, var_seed, var_seed_strength, sigma_max, sigma_min, rho, add_noise, return_with_leftover_noise, previews, tile_sample, tile_size):
349
+ if tile_sample:
350
+ return self.tiled_sample(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, var_seed, var_seed_strength, sigma_max, sigma_min, rho, add_noise, return_with_leftover_noise, previews, tile_size)
351
+ else:
352
+ return self.sample(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, var_seed, var_seed_strength, sigma_max, sigma_min, rho, add_noise, return_with_leftover_noise, previews)
353
+
354
+ NODE_CLASS_MAPPINGS = {
355
+ "SwarmKSampler": SwarmKSampler,
356
+ }
SwarmLatents.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import comfy, torch
2
+
3
+ class SwarmOffsetEmptyLatentImage:
4
+ def __init__(self):
5
+ self.device = comfy.model_management.intermediate_device()
6
+
7
+ @classmethod
8
+ def INPUT_TYPES(s):
9
+ return {
10
+ "required": {
11
+ "width": ("INT", {"default": 512, "min": 16, "max": 4096, "step": 8}),
12
+ "height": ("INT", {"default": 512, "min": 16, "max": 4096, "step": 8}),
13
+ "off_a": ("INT", {"default": 0, "min": -10, "max": 10, "step": 0.0001}),
14
+ "off_b": ("INT", {"default": 0, "min": -10, "max": 10, "step": 0.0001}),
15
+ "off_c": ("INT", {"default": 0, "min": -10, "max": 10, "step": 0.0001}),
16
+ "off_d": ("INT", {"default": 0, "min": -10, "max": 10, "step": 0.0001}),
17
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})
18
+ }
19
+ }
20
+
21
+ CATEGORY = "SwarmUI/latents"
22
+ RETURN_TYPES = ("LATENT",)
23
+ FUNCTION = "generate"
24
+ DESCRIPTION = "Generates a latent image with 4 channels, each channel filled with a different offset value. Designed to allow alternate empty value offsets for SDv1 and SDXL."
25
+
26
+ def generate(self, width, height, off_a, off_b, off_c, off_d, batch_size=1):
27
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
28
+ latent[:, 0, :, :] = off_a
29
+ latent[:, 1, :, :] = off_b
30
+ latent[:, 2, :, :] = off_c
31
+ latent[:, 3, :, :] = off_d
32
+ return ({"samples":latent}, )
33
+
34
+
35
+ NODE_CLASS_MAPPINGS = {
36
+ "SwarmOffsetEmptyLatentImage": SwarmOffsetEmptyLatentImage
37
+ }
SwarmLoadImageB64.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageOps
2
+ import numpy as np
3
+ import torch, base64, io
4
+
5
+ def b64_to_img_and_mask(image_base64):
6
+ imageData = base64.b64decode(image_base64)
7
+ i = Image.open(io.BytesIO(imageData))
8
+ if hasattr(i, 'is_animated') and i.is_animated:
9
+ images = []
10
+ for frame in range(i.n_frames):
11
+ i.seek(frame)
12
+ images.append(i.convert("RGB"))
13
+ i.seek(0)
14
+ image = np.array(images).astype(np.float32) / 255.0
15
+ image = torch.from_numpy(image)
16
+ else:
17
+ i = ImageOps.exif_transpose(i)
18
+ image = i.convert("RGB")
19
+ image = np.array(image).astype(np.float32) / 255.0
20
+ image = torch.from_numpy(image)[None,]
21
+ if 'A' in i.getbands():
22
+ mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
23
+ mask = 1. - torch.from_numpy(mask)
24
+ else:
25
+ mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
26
+ return (image, mask.unsqueeze(0))
27
+
28
+ class SwarmLoadImageB64:
29
+ @classmethod
30
+ def INPUT_TYPES(s):
31
+ return {
32
+ "required": {
33
+ "image_base64": ("STRING", {"multiline": True})
34
+ }
35
+ }
36
+
37
+ CATEGORY = "SwarmUI/images"
38
+ RETURN_TYPES = ("IMAGE", "MASK")
39
+ FUNCTION = "load_image_b64"
40
+ DESCRIPTION = "Loads an image from a base64 string. Works like a regular LoadImage node, but with input format designed to be easier to use through automated calls, including SwarmUI with custom workflows."
41
+
42
+ def load_image_b64(self, image_base64):
43
+ return b64_to_img_and_mask(image_base64)
44
+
45
+ NODE_CLASS_MAPPINGS = {
46
+ "SwarmLoadImageB64": SwarmLoadImageB64,
47
+ }
SwarmLoraLoader.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import comfy
2
+ import folder_paths
3
+
4
+ class SwarmLoraLoader:
5
+ def __init__(self):
6
+ self.loaded_lora = None
7
+
8
+ @classmethod
9
+ def INPUT_TYPES(s):
10
+ return {
11
+ "required": {
12
+ "model": ("MODEL", ),
13
+ "clip": ("CLIP", ),
14
+ "lora_names": ("STRING", {"multiline": True, "tooltip": "Comma separated list of lora names to load."}),
15
+ "lora_weights": ("STRING", {"multiline": True, "tooltip": "Comma separated list of lora weights to apply to each lora. Must match the number of loras."}),
16
+ }
17
+ }
18
+
19
+ CATEGORY = "SwarmUI/models"
20
+ RETURN_TYPES = ("MODEL", "CLIP")
21
+ FUNCTION = "load_loras"
22
+ DESCRIPTION = "Like a regular LoRA Loader, but designed to take a dynamic list of loras and weights, to allow easier integration with SwarmUI custom workflows."
23
+
24
+ def load_loras(self, model, clip, lora_names, lora_weights):
25
+ if lora_names.strip() == "":
26
+ return (model, clip)
27
+
28
+ lora_names = lora_names.split(",")
29
+ lora_weights = lora_weights.split(",")
30
+ lora_weights = [float(x.strip()) for x in lora_weights]
31
+
32
+ for i in range(len(lora_names)):
33
+ lora_name = lora_names[i].strip()
34
+ weight = lora_weights[i]
35
+ if weight == 0:
36
+ continue
37
+ # This section copied directly from default comfy LoraLoader
38
+ lora_path = folder_paths.get_full_path("loras", lora_name)
39
+ lora = None
40
+ if self.loaded_lora is not None:
41
+ if self.loaded_lora[0] == lora_path:
42
+ lora = self.loaded_lora[1]
43
+ else:
44
+ temp = self.loaded_lora
45
+ self.loaded_lora = None
46
+ del temp
47
+ if lora is None:
48
+ lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
49
+ self.loaded_lora = (lora_path, lora)
50
+ model, clip = comfy.sd.load_lora_for_models(model, clip, lora, weight, weight)
51
+
52
+ return (model, clip)
53
+
54
+ NODE_CLASS_MAPPINGS = {
55
+ "SwarmLoraLoader": SwarmLoraLoader,
56
+ }
SwarmMasks.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, comfy
2
+
3
+ intermediate_device = comfy.model_management.intermediate_device()
4
+ main_device = comfy.model_management.get_torch_device()
5
+
6
+ class SwarmSquareMaskFromPercent:
7
+ @classmethod
8
+ def INPUT_TYPES(s):
9
+ return {
10
+ "required": {
11
+ "x": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05, "round": 0.0001, "tooltip": "The x position of the mask as a percentage of the image size."}),
12
+ "y": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05, "round": 0.0001, "tooltip": "The y position of the mask as a percentage of the image size."}),
13
+ "width": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05, "round": 0.0001, "tooltip": "The width of the mask as a percentage of the image size."}),
14
+ "height": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05, "round": 0.0001, "tooltip": "The height of the mask as a percentage of the image size."}),
15
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "tooltip": "The strength of the mask, ie the value of all masked pixels, leaving the rest black ie 0."}),
16
+ }
17
+ }
18
+
19
+ CATEGORY = "SwarmUI/masks"
20
+ RETURN_TYPES = ("MASK",)
21
+ FUNCTION = "mask_from_perc"
22
+ DESCRIPTION = "Creates a simple square mask with the specified dimensions and position, with the specified strength (ie value of all masked pixels, leaving the rest black ie 0)."
23
+
24
+ def mask_from_perc(self, x, y, width, height, strength):
25
+ SCALE = 256
26
+ mask = torch.zeros((SCALE, SCALE), dtype=torch.float32, device=intermediate_device)
27
+ mask[int(y*SCALE):int((y+height)*SCALE), int(x*SCALE):int((x+width)*SCALE)] = strength
28
+ return (mask.unsqueeze(0),)
29
+
30
+
31
+ def mask_size_match(mask_a, mask_b):
32
+ if len(mask_a.shape) == 2:
33
+ mask_a = mask_a.unsqueeze(0)
34
+ if len(mask_b.shape) == 2:
35
+ mask_b = mask_b.unsqueeze(0)
36
+ height = max(mask_a.shape[1], mask_b.shape[1])
37
+ width = max(mask_a.shape[2], mask_b.shape[2])
38
+ if mask_a.shape[1] != height or mask_a.shape[2] != width:
39
+ mask_a = torch.nn.functional.interpolate(mask_a.unsqueeze(0), size=(height, width), mode="bicubic")[0]
40
+ if mask_b.shape[1] != height or mask_b.shape[2] != width:
41
+ mask_b = torch.nn.functional.interpolate(mask_b.unsqueeze(0), size=(height, width), mode="bicubic")[0]
42
+ return (mask_a, mask_b)
43
+
44
+
45
+ class SwarmOverMergeMasksForOverlapFix:
46
+ @classmethod
47
+ def INPUT_TYPES(s):
48
+ return {
49
+ "required": {
50
+ "mask_a": ("MASK",),
51
+ "mask_b": ("MASK",),
52
+ }
53
+ }
54
+
55
+ CATEGORY = "SwarmUI/masks"
56
+ RETURN_TYPES = ("MASK",)
57
+ FUNCTION = "mask_overmerge"
58
+ DESCRIPTION = "Merges two masks by simply adding them together, without any overlap handling. Intended for use with the Overlap nodes."
59
+
60
+ def mask_overmerge(self, mask_a, mask_b):
61
+ mask_a, mask_b = mask_size_match(mask_a, mask_b)
62
+ mask_sum = mask_a + mask_b
63
+ return (mask_sum,)
64
+
65
+
66
+ class SwarmCleanOverlapMasks:
67
+ @classmethod
68
+ def INPUT_TYPES(s):
69
+ return {
70
+ "required": {
71
+ "mask_a": ("MASK",),
72
+ "mask_b": ("MASK",),
73
+ }
74
+ }
75
+
76
+ CATEGORY = "SwarmUI/masks"
77
+ RETURN_TYPES = ("MASK","MASK",)
78
+ FUNCTION = "mask_overlap"
79
+ DESCRIPTION = "Normalizes the overlap between two masks, such that where they overlap each mask will receive only partial strength that sums to no more than 1.0. This allows you to then add the masks together and the result will not exceed 1 at any point."
80
+
81
+ def mask_overlap(self, mask_a, mask_b):
82
+ mask_a, mask_b = mask_size_match(mask_a, mask_b)
83
+ mask_sum = mask_a + mask_b
84
+ mask_sum = mask_sum.clamp(1.0, 9999.0)
85
+ mask_a = mask_a / mask_sum
86
+ mask_b = mask_b / mask_sum
87
+ return (mask_a, mask_b)
88
+
89
+
90
+ class SwarmCleanOverlapMasksExceptSelf:
91
+ @classmethod
92
+ def INPUT_TYPES(s):
93
+ return {
94
+ "required": {
95
+ "mask_self": ("MASK",),
96
+ "mask_merged": ("MASK",),
97
+ }
98
+ }
99
+
100
+ CATEGORY = "SwarmUI/masks"
101
+ RETURN_TYPES = ("MASK",)
102
+ FUNCTION = "mask_clean"
103
+ DESCRIPTION = "If masks have been overmerged, this takes a single mask and grabs just the CleanOverlap result for the one mask relative to the overmerge result."
104
+
105
+ def mask_clean(self, mask_self, mask_merged):
106
+ mask_self, mask_merged = mask_size_match(mask_self, mask_merged)
107
+ mask_sum = mask_merged.clamp(1.0, 9999.0)
108
+ mask_self = mask_self / mask_sum
109
+ return (mask_self,)
110
+
111
+
112
+ class SwarmExcludeFromMask:
113
+ @classmethod
114
+ def INPUT_TYPES(s):
115
+ return {
116
+ "required": {
117
+ "main_mask": ("MASK",),
118
+ "exclude_mask": ("MASK",),
119
+ }
120
+ }
121
+
122
+ CATEGORY = "SwarmUI/masks"
123
+ RETURN_TYPES = ("MASK",)
124
+ FUNCTION = "mask_exclude"
125
+ DESCRIPTION = "Excludes the area of the exclude mask from the main mask, such that the main mask will be black in the area of the exclude mask. This is a simple subtract and clamp."
126
+
127
+ def mask_exclude(self, main_mask, exclude_mask):
128
+ main_mask, exclude_mask = mask_size_match(main_mask, exclude_mask)
129
+ main_mask = main_mask - exclude_mask
130
+ main_mask = main_mask.clamp(0.0, 1.0)
131
+ return (main_mask,)
132
+
133
+
134
+ class SwarmMaskBounds:
135
+ @classmethod
136
+ def INPUT_TYPES(s):
137
+ return {
138
+ "required": {
139
+ "mask": ("MASK",),
140
+ "grow": ("INT", {"default": 0, "min": 0, "max": 1024, "tooltip": "Number of pixels to grow the mask by."}),
141
+ },
142
+ "optional": {
143
+ "aspect_x": ("INT", {"default": 0, "min": 0, "max": 4096, "tooltip": "An X width value, used to indicate a target aspect ratio. 0 to allow any aspect."}),
144
+ "aspect_y": ("INT", {"default": 0, "min": 0, "max": 4096, "tooltip": "A Y height value, used to indicate a target aspect ratio. 0 to allow any aspect."}),
145
+ }
146
+ }
147
+
148
+ CATEGORY = "SwarmUI/masks"
149
+ RETURN_TYPES = ("INT", "INT", "INT", "INT")
150
+ RETURN_NAMES = ("x", "y", "width", "height")
151
+ FUNCTION = "get_bounds"
152
+ DESCRIPTION = "Returns the bounding box of the mask (as pixel coordinates x,y,width,height), optionally grown by the number of pixels specified in 'grow'."
153
+
154
+ def get_bounds(self, mask, grow, aspect_x=0, aspect_y=0):
155
+ if len(mask.shape) == 3:
156
+ mask = mask[0]
157
+ sum_x = (torch.sum(mask, dim=0) != 0).to(dtype=torch.int)
158
+ sum_y = (torch.sum(mask, dim=1) != 0).to(dtype=torch.int)
159
+ def getval(arr, direction):
160
+ val = torch.argmax(arr).item()
161
+ val += grow * direction
162
+ val = max(0, min(val, arr.shape[0] - 1))
163
+ return val
164
+ x_start = getval(sum_x, -1)
165
+ x_end = mask.shape[1] - getval(sum_x.flip(0), -1)
166
+ y_start = getval(sum_y, -1)
167
+ y_end = mask.shape[0] - getval(sum_y.flip(0), -1)
168
+ if aspect_x > 0 and aspect_y > 0:
169
+ actual_aspect = aspect_x / aspect_y
170
+ width = x_end - x_start
171
+ height = y_end - y_start
172
+ found_aspect = width / height
173
+ if found_aspect > actual_aspect:
174
+ desired_height = width / actual_aspect
175
+ y_start = max(0, y_start - (desired_height - height) / 2)
176
+ y_end = min(mask.shape[0], y_start + desired_height)
177
+ else:
178
+ desired_width = height * actual_aspect
179
+ x_start = max(0, x_start - (desired_width - width) / 2)
180
+ x_end = min(mask.shape[1], x_start + desired_width)
181
+ return (int(x_start), int(y_start), int(x_end - x_start), int(y_end - y_start))
182
+
183
+
184
+ class SwarmMaskGrow:
185
+ @classmethod
186
+ def INPUT_TYPES(s):
187
+ return {
188
+ "required": {
189
+ "mask": ("MASK",),
190
+ "grow": ("INT", {"default": 0, "min": 0, "max": 1024, "tooltip": "Number of pixels to grow the mask by."}),
191
+ }
192
+ }
193
+
194
+ CATEGORY = "SwarmUI/masks"
195
+ RETURN_TYPES = ("MASK",)
196
+ FUNCTION = "grow"
197
+ DESCRIPTION = "Expands the contents of the max, such that masked (white) areas grow and cover the unmasked (black) areas by the number of pixels specified in 'grow'."
198
+
199
+ def grow(self, mask, grow):
200
+ while mask.ndim < 4:
201
+ mask = mask.unsqueeze(0)
202
+ mask = mask.to(device=main_device)
203
+ # iterate rather than all at once - this avoids padding and runs much faster for large sizes
204
+ for _ in range((grow + 1) // 2):
205
+ mask = torch.nn.functional.max_pool2d(mask, kernel_size=3, stride=1, padding=1)
206
+ return (mask.to(device=intermediate_device),)
207
+
208
+
209
+ # Blur code is copied out of ComfyUI's default ImageBlur
210
+ def gaussian_kernel(kernel_size: int, sigma: float, device=None):
211
+ x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size, device=device), torch.linspace(-1, 1, kernel_size, device=device), indexing="ij")
212
+ d = torch.sqrt(x * x + y * y)
213
+ g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
214
+ return g / g.sum()
215
+
216
+
217
+ class SwarmMaskBlur:
218
+ def __init__(self):
219
+ pass
220
+
221
+ @classmethod
222
+ def INPUT_TYPES(s):
223
+ return {
224
+ "required": {
225
+ "mask": ("MASK",),
226
+ "blur_radius": ("INT", { "default": 1, "min": 1, "max": 64, "step": 1, "tooltip": "The radius of the blur kernel." }),
227
+ "sigma": ("FLOAT", { "default": 1.0, "min": 0.1, "max": 10.0, "step": 0.1, "tooltip": "The standard deviation of the Gaussian blur kernel." }),
228
+ },
229
+ }
230
+
231
+ RETURN_TYPES = ("MASK",)
232
+ FUNCTION = "blur"
233
+ CATEGORY = "SwarmUI/masks"
234
+ DESCRIPTION = "Blurs the contents of the mask."
235
+
236
+ def blur(self, mask, blur_radius, sigma):
237
+ if blur_radius == 0:
238
+ return (mask,)
239
+ mask = mask.to(device=main_device)
240
+ kernel_size = blur_radius * 2 + 1
241
+ kernel = gaussian_kernel(kernel_size, sigma, device=mask.device).repeat(1, 1, 1).unsqueeze(1)
242
+ while mask.ndim < 4:
243
+ mask = mask.unsqueeze(0)
244
+ padded_mask = torch.nn.functional.pad(mask, (blur_radius,blur_radius,blur_radius,blur_radius), 'reflect')
245
+ blurred = torch.nn.functional.conv2d(padded_mask, kernel, padding=kernel_size // 2, groups=1)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
246
+ blurred = blurred.squeeze(0).squeeze(0)
247
+ mask = mask.to(device=intermediate_device)
248
+ return (blurred.to(device=intermediate_device),)
249
+
250
+
251
+ class SwarmMaskThreshold:
252
+ def __init__(self):
253
+ pass
254
+
255
+ @classmethod
256
+ def INPUT_TYPES(s):
257
+ return {
258
+ "required": {
259
+ "mask": ("MASK",),
260
+ "min": ("FLOAT", { "default": 0.2, "min": 0, "max": 1, "step": 0.01, "tooltip": "The minimum value to threshold the mask to." }),
261
+ "max": ("FLOAT", { "default": 0.8, "min": 0, "max": 1, "step": 0.01, "tooltip": "The maximum value to threshold the mask to." }),
262
+ },
263
+ }
264
+
265
+ RETURN_TYPES = ("MASK",)
266
+ FUNCTION = "threshold"
267
+ CATEGORY = "SwarmUI/masks"
268
+ DESCRIPTION = "Thresholds the mask to the specified range, clamping any lower or higher values and rescaling the range to 0-1."
269
+
270
+ def threshold(self, mask, min, max):
271
+ mask = mask.clamp(min, max)
272
+ mask = mask - min
273
+ mask = mask / (max - min)
274
+ return (mask,)
275
+
276
+
277
+ NODE_CLASS_MAPPINGS = {
278
+ "SwarmSquareMaskFromPercent": SwarmSquareMaskFromPercent,
279
+ "SwarmCleanOverlapMasks": SwarmCleanOverlapMasks,
280
+ "SwarmCleanOverlapMasksExceptSelf": SwarmCleanOverlapMasksExceptSelf,
281
+ "SwarmExcludeFromMask": SwarmExcludeFromMask,
282
+ "SwarmOverMergeMasksForOverlapFix": SwarmOverMergeMasksForOverlapFix,
283
+ "SwarmMaskBounds": SwarmMaskBounds,
284
+ "SwarmMaskGrow": SwarmMaskGrow,
285
+ "SwarmMaskBlur": SwarmMaskBlur,
286
+ "SwarmMaskThreshold": SwarmMaskThreshold,
287
+ }
SwarmMath.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ class SwarmIntAdd:
3
+ @classmethod
4
+ def INPUT_TYPES(s):
5
+ return {
6
+ "required": {
7
+ "a": ("INT", {"default": 0, "min": -2147483647, "max": 2147483647}),
8
+ "b": ("INT", {"default": 0, "min": -2147483647, "max": 2147483647})
9
+ }
10
+ }
11
+
12
+ CATEGORY = "SwarmUI/math"
13
+ RETURN_TYPES = ("INT",)
14
+ FUNCTION = "add"
15
+ DESCRIPTION = "Adds two integers. Use a negative number to subtract."
16
+
17
+ def add(self, a, b):
18
+ return (a + b,)
19
+
20
+
21
+ NODE_CLASS_MAPPINGS = {
22
+ "SwarmIntAdd": SwarmIntAdd
23
+ }
SwarmReference.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ # This code copied from https://github.com/comfyanonymous/ComfyUI_experiments/blob/master/reference_only.py
4
+ # And modified to work better in Swarm generated workflows
5
+
6
+ class SwarmReferenceOnly:
7
+ @classmethod
8
+ def INPUT_TYPES(s):
9
+ return {
10
+ "required": {
11
+ "model": ("MODEL",),
12
+ "reference": ("LATENT",),
13
+ "latent": ("LATENT",)
14
+ }
15
+ }
16
+
17
+ CATEGORY = "SwarmUI/sampling"
18
+ RETURN_TYPES = ("MODEL", "LATENT")
19
+ FUNCTION = "reference_only"
20
+ DESCRIPTION = "Applies 'reference only' image-prompting to the generation. Must forward the new model, and the new latent, to work properly."
21
+
22
+ def reference_only(self, model, reference, latent):
23
+ model_reference = model.clone()
24
+ reference["samples"] = torch.nn.functional.interpolate(reference["samples"], size=(latent["samples"].shape[2], latent["samples"].shape[3]), mode="bilinear")
25
+
26
+ batch = latent["samples"].shape[0] + reference["samples"].shape[0]
27
+ def reference_apply(q, k, v, extra_options):
28
+ k = k.clone().repeat(1, 2, 1)
29
+ offset = 0
30
+ if q.shape[0] > batch:
31
+ offset = batch
32
+
33
+ for o in range(0, q.shape[0], batch):
34
+ for x in range(1, batch):
35
+ k[x + o, q.shape[1]:] = q[o,:]
36
+
37
+ return q, k, k
38
+
39
+ model_reference.set_model_attn1_patch(reference_apply)
40
+ out_latent = torch.cat((reference["samples"], latent["samples"]))
41
+ if "noise_mask" in latent:
42
+ mask = latent["noise_mask"]
43
+ else:
44
+ mask = torch.ones((64,64), dtype=torch.float32, device="cpu")
45
+
46
+ if len(mask.shape) < 3:
47
+ mask = mask.unsqueeze(0)
48
+ if mask.shape[0] < latent["samples"].shape[0]:
49
+ mask = mask.repeat(latent["samples"].shape[0], 1, 1)
50
+
51
+ out_mask = torch.zeros((1,mask.shape[1],mask.shape[2]), dtype=torch.float32, device="cpu")
52
+ return (model_reference, {"samples": out_latent, "noise_mask": torch.cat((out_mask, mask))})
53
+
54
+ NODE_CLASS_MAPPINGS = {
55
+ "SwarmReferenceOnly": SwarmReferenceOnly,
56
+ }
SwarmSaveImageWS.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+ import comfy.utils
4
+ from server import PromptServer, BinaryEventTypes
5
+ import time, io, struct
6
+
7
+ SPECIAL_ID = 12345 # Tells swarm that the node is going to output final images
8
+ VIDEO_ID = 12346
9
+ TEXT_ID = 12347
10
+
11
+ def send_image_to_server_raw(type_num: int, save_me: callable, id: int, event_type: int = BinaryEventTypes.PREVIEW_IMAGE):
12
+ out = io.BytesIO()
13
+ header = struct.pack(">I", type_num)
14
+ out.write(header)
15
+ save_me(out)
16
+ out.seek(0)
17
+ preview_bytes = out.getvalue()
18
+ server = PromptServer.instance
19
+ server.send_sync("progress", {"value": id, "max": id}, sid=server.client_id)
20
+ server.send_sync(event_type, preview_bytes, sid=server.client_id)
21
+
22
+ class SwarmSaveImageWS:
23
+ @classmethod
24
+ def INPUT_TYPES(s):
25
+ return {
26
+ "required": {
27
+ "images": ("IMAGE", ),
28
+ },
29
+ "optional": {
30
+ "bit_depth": (["8bit", "16bit", "raw"], {"default": "8bit"})
31
+ }
32
+ }
33
+
34
+ CATEGORY = "SwarmUI/images"
35
+ RETURN_TYPES = ()
36
+ FUNCTION = "save_images"
37
+ OUTPUT_NODE = True
38
+ DESCRIPTION = "Acts like a special version of 'SaveImage' that doesn't actual save to disk, instead it sends directly over websocket. This is intended so that SwarmUI can save the image itself rather than having Comfy's Core save it."
39
+
40
+ def save_images(self, images, bit_depth = "8bit"):
41
+ pbar = comfy.utils.ProgressBar(SPECIAL_ID)
42
+ step = 0
43
+ for image in images:
44
+ if bit_depth == "raw":
45
+ i = 255.0 * image.cpu().numpy()
46
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
47
+ def do_save(out):
48
+ img.save(out, format='BMP')
49
+ send_image_to_server_raw(1, do_save, SPECIAL_ID, event_type=10)
50
+ elif bit_depth == "16bit":
51
+ i = 65535.0 * image.cpu().numpy()
52
+ img = self.convert_img_16bit(np.clip(i, 0, 65535).astype(np.uint16))
53
+ send_image_to_server_raw(2, lambda out: out.write(img), SPECIAL_ID)
54
+ else:
55
+ i = 255.0 * image.cpu().numpy()
56
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
57
+ pbar.update_absolute(step, SPECIAL_ID, ("PNG", img, None))
58
+ step += 1
59
+
60
+ return {}
61
+
62
+ def convert_img_16bit(self, img_np):
63
+ try:
64
+ import cv2
65
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
66
+ success, img_encoded = cv2.imencode('.png', img_np)
67
+
68
+ if img_encoded is None or not success:
69
+ raise RuntimeError("OpenCV failed to encode image.")
70
+
71
+ return img_encoded.tobytes()
72
+ except Exception as e:
73
+ print(f"Error converting OpenCV image to PIL: {e}")
74
+ raise
75
+
76
+ @classmethod
77
+ def IS_CHANGED(s, images, bit_depth = "8bit"):
78
+ return time.time()
79
+
80
+
81
+ class SwarmSaveAnimatedWebpWS:
82
+ methods = {"default": 4, "fastest": 0, "slowest": 6}
83
+
84
+ @classmethod
85
+ def INPUT_TYPES(s):
86
+ return {
87
+ "required": {
88
+ "images": ("IMAGE", ),
89
+ "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01, "tooltip": "Frames per second, must match the actual generated speed or else you will get slow/fast motion."}),
90
+ "lossless": ("BOOLEAN", {"default": True, "tooltip": "If true, the image will be saved losslessly, otherwise it will be saved with the quality specified. Lossless is best quality, but takes more file space."}),
91
+ "quality": ("INT", {"default": 80, "min": 0, "max": 100, "tooltip": "Quality of the image as a percentage, only used if lossless is false. Smaller values save more space but look worse. 80 is a fine general value."}),
92
+ "method": (list(s.methods.keys()),),
93
+ },
94
+ }
95
+
96
+ CATEGORY = "SwarmUI/video"
97
+ RETURN_TYPES = ()
98
+ FUNCTION = "save_images"
99
+ OUTPUT_NODE = True
100
+ DESCRIPTION = "Acts like a special version of 'SaveAnimatedWEBP' that doesn't actual save to disk, instead it sends directly over websocket. This is intended so that SwarmUI can save the image itself rather than having Comfy's Core save it."
101
+
102
+ def save_images(self, images, fps, lossless, quality, method):
103
+ method = self.methods.get(method)
104
+ pil_images = []
105
+ for image in images:
106
+ i = 255. * image.cpu().numpy()
107
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
108
+ pil_images.append(img)
109
+
110
+ def do_save(out):
111
+ pil_images[0].save(out, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1 : len(pil_images)], lossless=lossless, quality=quality, method=method, format='WEBP')
112
+ send_image_to_server_raw(3, do_save, VIDEO_ID)
113
+
114
+ return { }
115
+
116
+ @classmethod
117
+ def IS_CHANGED(s, images, fps, lossless, quality, method):
118
+ return time.time()
119
+
120
+
121
+ class SwarmAddSaveMetadataWS:
122
+ @classmethod
123
+ def INPUT_TYPES(s):
124
+ return {
125
+ "required": {
126
+ "key": ("STRING", {"tooltip": "The key to add to the metadata tracker. Must be simple A-Z plain text or underscores."}),
127
+ "value": ("STRING", {"tooltip": "The value to add to the metadata tracker."}),
128
+ }
129
+ }
130
+
131
+ CATEGORY = "SwarmUI/images"
132
+ RETURN_TYPES = ()
133
+ FUNCTION = "add_save_metadata"
134
+ OUTPUT_NODE = True
135
+ DESCRIPTION = "Adds a metadata key/value pair to SwarmUI's metadata tracker for this generation, which will be appended to any images saved after this node triggers. Note that keys overwrite, not add. Any key can have only one value."
136
+
137
+ def add_save_metadata(self, key, value):
138
+ full_text = f"{key}:{value}"
139
+ full_text_bytes = full_text.encode('utf-8')
140
+ send_image_to_server_raw(0, lambda out: out.write(full_text_bytes), TEXT_ID, event_type=BinaryEventTypes.TEXT)
141
+ return {}
142
+
143
+ @classmethod
144
+ def IS_CHANGED(s, key, value):
145
+ return time.time()
146
+
147
+
148
+ NODE_CLASS_MAPPINGS = {
149
+ "SwarmSaveImageWS": SwarmSaveImageWS,
150
+ "SwarmSaveAnimatedWebpWS": SwarmSaveAnimatedWebpWS,
151
+ "SwarmAddSaveMetadataWS": SwarmAddSaveMetadataWS,
152
+ }
SwarmTextHandling.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, comfy
2
+ from nodes import MAX_RESOLUTION
3
+
4
+
5
+ # LLaMA template for Hunyuan Image2Video.
6
+ # This is actually a single-line monstrosity due to the way it's formatted.
7
+ # This is probably an accident from the python devs misunderstanding how string lines work,
8
+ # but, well, we're just matching what they did and that's what they did.
9
+ PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = (
10
+ "<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: "
11
+ "1. The main content and theme of the video."
12
+ "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
13
+ "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
14
+ "4. background environment, light, style and atmosphere."
15
+ "5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n"
16
+ "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
17
+ "<|start_header_id|>assistant<|end_header_id|>\n\n"
18
+ )
19
+ # LLaMA template for Qwen Image Edit Plus.
20
+ PROMPT_TEMPLATE_QWEN_IMAGE_EDIT_PLUS = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
21
+
22
+ class SwarmClipTextEncodeAdvanced:
23
+ @classmethod
24
+ def INPUT_TYPES(s):
25
+ return {
26
+ "required": {
27
+ "clip": ("CLIP", ),
28
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "How many sampling steps will be ran - this is needed for per-step features (from-to/alternate/...) to work properly."}),
29
+ "prompt": ("STRING", {"multiline": True, "dynamicPrompts": True, "tooltip": "Your actual prompt text."} ),
30
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION, "tooltip": "Intended width of the image, used by some models (eg SDXL)."}),
31
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION, "tooltip": "Intended height of the image, used by some models (eg SDXL)."}),
32
+ "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION, "tooltip": "Actual width of the image, used by some models (eg SDXL)."}),
33
+ "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION, "tooltip": "Actual height of the image, used by some models (eg SDXL)."}),
34
+ },
35
+ "optional": {
36
+ "guidance": ("FLOAT", {"default": -1, "min": -1, "max": 100.0, "step": 0.1, "tooltip": "Guidance value to embed, used by some models (eg Flux)."}),
37
+ "llama_template": ("STRING", {"default": "", "multiline": True, "tooltip": "Template for the LLaMA model, if applicable."}),
38
+ "clip_vision_output": ("CLIP_VISION_OUTPUT", {"default": None, "tooltip": "Optional CLIP Vision Output to use for the LLaMA model, if applicable."}),
39
+ "images": ("IMAGE", {"default": None, "tooltip": "Optional images to use for a text-vision model, if applicable."}),
40
+ }
41
+ }
42
+
43
+ CATEGORY = "SwarmUI/clip"
44
+ RETURN_TYPES = ("CONDITIONING",)
45
+ FUNCTION = "encode"
46
+ DESCRIPTION = "Acts like the regular CLIPTextEncode, but supports more advanced special features like '<break>', '[from:to:when]', '[alter|nate]', ..."
47
+
48
+ def encode(self, clip, steps: int, prompt: str, width: int, height: int, target_width: int, target_height: int, guidance: float = -1, llama_template = None, clip_vision_output = None, images = None):
49
+ image_prompt = ""
50
+ if llama_template == "hunyuan_image":
51
+ llama_template = PROMPT_TEMPLATE_ENCODE_VIDEO_I2V
52
+ elif llama_template == "qwen_image_edit_plus":
53
+ llama_template = PROMPT_TEMPLATE_QWEN_IMAGE_EDIT_PLUS
54
+ if images is not None:
55
+ if len(images.shape) == 3:
56
+ images = [images]
57
+ else:
58
+ images = [i.unsqueeze(0) for i in images]
59
+ for i, image in enumerate(images):
60
+ image_prompt += f"Picture {i + 1}: <|vision_start|><|image_pad|><|vision_end|>"
61
+
62
+ def tokenize(text: str):
63
+ if clip_vision_output is not None:
64
+ return clip.tokenize(text, llama_template=llama_template, image_embeds=clip_vision_output.mm_projected)
65
+ elif images is not None:
66
+ return clip.tokenize(image_prompt + text, llama_template=llama_template, images=images)
67
+ else:
68
+ return clip.tokenize(text)
69
+
70
+ encoding_cache = {}
71
+
72
+ def text_to_cond(text: str, start_percent: float, end_percent: float):
73
+ text = text.replace("\0\1", "[").replace("\0\2", "]").replace("\0\3", "embedding:")
74
+ if text in encoding_cache:
75
+ cond_arr = encoding_cache[text]
76
+ else:
77
+ cond_chunks = text.split("<break>")
78
+ tokens = tokenize(cond_chunks[0])
79
+ cond_arr = clip.encode_from_tokens_scheduled(tokens)
80
+ if len(cond_chunks) > 1:
81
+ for chunk in cond_chunks[1:]:
82
+ tokens = tokenize(chunk)
83
+ cond_arr_chunk = clip.encode_from_tokens_scheduled(tokens)
84
+ catted_cond = torch.cat([cond_arr[0][0], cond_arr_chunk[0][0]], dim=1)
85
+ cond_arr[0] = [catted_cond, cond_arr[0][1]]
86
+ encoding_cache[text] = cond_arr
87
+ result = {"pooled_output": cond_arr[0][1]["pooled_output"], "width": width, "height": height, "crop_w": 0, "crop_h": 0, "target_width": target_width, "target_height": target_height, "start_percent": start_percent, "end_percent": end_percent}
88
+ if guidance >= 0:
89
+ result["guidance"] = guidance
90
+ out_cond_arr = [[cond_arr[0][0], result]]
91
+ out_cond_arr.extend(cond_arr[1:])
92
+ return out_cond_arr
93
+
94
+ prompt = prompt.replace("\\[", "\0\1").replace("\\]", "\0\2").replace("embedding:", "\0\3")
95
+
96
+ chunks = []
97
+ any = [False]
98
+ escapable = ["\\", "[", "]", ":", "|", "(", ")", "<", ">"]
99
+
100
+ def append_chunk(text: str, applies_to: list[int], can_subprocess: bool, limit_to: list[int]):
101
+ applies_to = [i for i in applies_to if i in limit_to]
102
+ fixed_text = ""
103
+ do_skip = False
104
+ for i in range(len(text)):
105
+ if text[i] == "\\" and not do_skip and i + 1 < len(text) and text[i + 1] in escapable:
106
+ do_skip = True
107
+ else:
108
+ do_skip = False
109
+ fixed_text += text[i]
110
+ if can_subprocess and '[' in fixed_text:
111
+ get_chunks(fixed_text, applies_to)
112
+ else:
113
+ chunks.append({'text': text, 'applies_to': applies_to})
114
+
115
+ def get_chunks(remaining: str, limit_to: list[int] = [i for i in range(steps)]):
116
+ while True:
117
+ start = remaining.find("[")
118
+ if start == -1:
119
+ append_chunk(remaining, [i for i in range(steps)], False, limit_to)
120
+ break
121
+
122
+ end = -1
123
+ count = 0
124
+ do_skip = False
125
+ colon_indices = []
126
+ pipe_indices = []
127
+ for i in range(start + 1, len(remaining)):
128
+ char = remaining[i]
129
+ if char == "\\" and not do_skip and i + 1 < len(remaining) and remaining[i + 1] in escapable:
130
+ do_skip = True
131
+ elif do_skip:
132
+ do_skip = False
133
+ elif char == "[":
134
+ count += 1
135
+ elif char == "]":
136
+ if count == 0:
137
+ end = i
138
+ break
139
+ count -= 1
140
+ elif char == ":" and count == 0 and len(pipe_indices) == 0:
141
+ colon_indices.append(i)
142
+ elif char == "|" and count == 0 and len(colon_indices) == 0:
143
+ pipe_indices.append(i)
144
+
145
+ if end == -1:
146
+ chunks[-1].text += remaining
147
+ break
148
+ append_chunk(remaining[:start], [i for i in range(steps)], False, limit_to)
149
+ control = remaining[start + 1:end]
150
+
151
+ if len(pipe_indices) > 0:
152
+ data = split_text_on(control, pipe_indices, start + 1)
153
+ for i in range(len(data)):
154
+ append_chunk(data[i], [step for step in range(steps) if step % len(data) == i], True, limit_to)
155
+ any[0] = True
156
+ elif len(colon_indices) == 2:
157
+ coloned = split_text_on(control, colon_indices, start + 1)
158
+ when = float(coloned[2])
159
+ if when < 1:
160
+ when = when * steps
161
+ append_chunk(coloned[0], [i for i in range(steps) if i < when], True, limit_to)
162
+ append_chunk(coloned[1], [i for i in range(steps) if i >= when], True, limit_to)
163
+ any[0] = True
164
+ elif len(colon_indices) == 1:
165
+ coloned = split_text_on(control, colon_indices, start + 1)
166
+ when = float(coloned[1])
167
+ if when < 1:
168
+ when = when * steps
169
+ append_chunk(coloned[0], [i for i in range(steps) if i >= when], True, limit_to)
170
+ any[0] = True
171
+ else:
172
+ append_chunk(control, [i for i in range(steps)], False, limit_to)
173
+
174
+ remaining = remaining[end + 1:]
175
+
176
+ get_chunks(prompt)
177
+
178
+ if not any[0]:
179
+ return (text_to_cond(prompt, 0, 1), )
180
+
181
+ conds_out = []
182
+ last_text = ""
183
+ start_perc = 0
184
+ for i in range(steps):
185
+ perc = i / steps
186
+ text = ""
187
+ for chunk in chunks:
188
+ if i in chunk['applies_to']:
189
+ text += chunk['text']
190
+ if text != last_text or i == 0:
191
+ if i != 0:
192
+ conds_out.extend(text_to_cond(last_text, start_perc - 0.001, perc + 0.001))
193
+ last_text = text
194
+ start_perc = perc
195
+ conds_out.extend(text_to_cond(last_text, start_perc - 0.001, 1))
196
+ return (conds_out, )
197
+
198
+
199
+ def split_text_on(text: str, indices: list[str], offset: int) -> list[str]:
200
+ indices = [i - offset for i in indices]
201
+ result = []
202
+ result.append(text[:indices[0]])
203
+ for i in range(len(indices) - 1):
204
+ result.append(text[indices[i] + 1:indices[i + 1]])
205
+ result.append(text[indices[-1] + 1:])
206
+ return result
207
+
208
+
209
+ NODE_CLASS_MAPPINGS = {
210
+ "SwarmClipTextEncodeAdvanced": SwarmClipTextEncodeAdvanced,
211
+ }
SwarmTiling.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, copy
2
+ from torch.nn import functional as F
3
+
4
+ def make_circular_assym(m, assym_mode):
5
+ def _conv_forward(self, input, weight, bias):
6
+ if self.padding_mode == "x_circular":
7
+ padded = F.pad(input, (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0), "circular")
8
+ padded = F.pad(padded, (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3]), "constant", 0)
9
+ return F.conv2d(padded, weight, bias, self.stride, (0, 0), self.dilation, self.groups)
10
+ elif self.padding_mode == "y_circular":
11
+ padded = F.pad(input, (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0), "constant", 0)
12
+ padded = F.pad(padded, (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3]), "circular")
13
+ return F.conv2d(padded, weight, bias, self.stride, (0, 0), self.dilation, self.groups)
14
+ elif self.padding_mode != "zeros":
15
+ padded = F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode)
16
+ return F.conv2d(padded, weight, bias, self.stride, (0, 0), self.dilation, self.groups)
17
+ else:
18
+ return F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
19
+ if isinstance(m, torch.nn.Conv2d):
20
+ m._conv_forward = _conv_forward.__get__(m, torch.nn.Conv2d)
21
+ m.padding_mode = assym_mode
22
+
23
+ def make_circular(m):
24
+ if isinstance(m, torch.nn.Conv2d):
25
+ m.padding_mode = "circular"
26
+
27
+ class SwarmModelTiling:
28
+ @classmethod
29
+ def INPUT_TYPES(s):
30
+ return {
31
+ "required": {
32
+ "model": ("MODEL", ),
33
+ },
34
+ "optional": {
35
+ "tile_axis": (["Both", "X", "Y"], )
36
+ }
37
+ }
38
+
39
+ CATEGORY = "SwarmUI/sampling"
40
+ RETURN_TYPES = ("MODEL",)
41
+ FUNCTION = "adapt"
42
+ DESCRIPTION = "Adapts a model to use circular padding to enable tiled image results. Only works on UNet based models (eg SDv1, SDXL), not on DiT models (eg SD3, Flux). Use with SwarmTileableVAE."
43
+
44
+ def adapt(self, model, tile_axis=None):
45
+ m = copy.deepcopy(model)
46
+ if tile_axis is not None and tile_axis != "Both":
47
+ if tile_axis == "X":
48
+ m.model.apply(lambda x: make_circular_assym(x, "x_circular"))
49
+ elif tile_axis == "Y":
50
+ m.model.apply(lambda x: make_circular_assym(x, "y_circular"))
51
+ else:
52
+ m.model.apply(make_circular)
53
+ return (m,)
54
+
55
+ class SwarmTileableVAE:
56
+ @classmethod
57
+ def INPUT_TYPES(s):
58
+ return {
59
+ "required": {
60
+ "vae": ("VAE", )
61
+ },
62
+ "optional": {
63
+ "tile_axis": (["Both", "X", "Y"], )
64
+ }
65
+ }
66
+
67
+ CATEGORY = "SwarmUI/sampling"
68
+ RETURN_TYPES = ("VAE",)
69
+ FUNCTION = "adapt"
70
+ DESCRIPTION = "Adapts a VAE to use circular padding to enable tiled image results. Use with SwarmModelTiling."
71
+
72
+ def adapt(self, vae, tile_axis=None):
73
+ vae = copy.deepcopy(vae)
74
+ if tile_axis is not None and tile_axis != "Both":
75
+ if tile_axis == "X":
76
+ vae.first_stage_model.apply(lambda x: make_circular_assym(x, "x_circular"))
77
+ elif tile_axis == "Y":
78
+ vae.first_stage_model.apply(lambda x: make_circular_assym(x, "y_circular"))
79
+ else:
80
+ vae.first_stage_model.apply(make_circular)
81
+ return (vae,)
82
+
83
+ NODE_CLASS_MAPPINGS = {
84
+ "SwarmModelTiling": SwarmModelTiling,
85
+ "SwarmTileableVAE": SwarmTileableVAE,
86
+ }
SwarmUnsampler.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, comfy
2
+ from .SwarmKSampler import make_swarm_sampler_callback
3
+
4
+ class SwarmUnsampler:
5
+ @classmethod
6
+ def INPUT_TYPES(s):
7
+ return {
8
+ "required": {
9
+ "model": ("MODEL",),
10
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
11
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
12
+ "scheduler": (["turbo"] + comfy.samplers.KSampler.SCHEDULERS, ),
13
+ "positive": ("CONDITIONING", ),
14
+ "negative": ("CONDITIONING", ),
15
+ "latent_image": ("LATENT", ),
16
+ "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
17
+ "previews": (["default", "none", "one"], )
18
+ }
19
+ }
20
+
21
+ CATEGORY = "SwarmUI/sampling"
22
+ RETURN_TYPES = ("LATENT",)
23
+ FUNCTION = "unsample"
24
+ DESCRIPTION = "Runs sampling in reverse. The function of this is to create noise that matches an image, such that you can the run forward sampling with an altered version of the unsampling prompt to get a closely altered image. May not work on all models, may not work perfectly. Input values should largely match your Sampler inputs."
25
+
26
+ def unsample(self, model, steps, sampler_name, scheduler, positive, negative, latent_image, start_at_step, previews):
27
+ device = comfy.model_management.get_torch_device()
28
+ latent_samples = latent_image["samples"].to(device)
29
+
30
+ noise = torch.zeros(latent_samples.size(), dtype=latent_samples.dtype, layout=latent_samples.layout, device=device)
31
+ noise_mask = None
32
+ if "noise_mask" in latent_image:
33
+ noise_mask = latent_image["noise_mask"]
34
+
35
+ sampler = comfy.samplers.KSampler(model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options)
36
+ sigmas = sampler.sigmas.flip(0) + 0.0001
37
+
38
+ callback = make_swarm_sampler_callback(steps, device, model, previews)
39
+
40
+ samples = comfy.sample.sample(model, noise, steps, 1, sampler_name, scheduler, positive, negative, latent_samples,
41
+ denoise=1.0, disable_noise=False, start_step=0, last_step=steps - start_at_step,
42
+ force_full_denoise=False, noise_mask=noise_mask, sigmas=sigmas, callback=callback, seed=0)
43
+ out = latent_image.copy()
44
+ out["samples"] = samples
45
+ return (out, )
46
+
47
+
48
+ NODE_CLASS_MAPPINGS = {
49
+ "SwarmUnsampler": SwarmUnsampler,
50
+ }
__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, folder_paths
2
+
3
+ from . import SwarmBlending, SwarmClipSeg, SwarmImages, SwarmInternalUtil, SwarmKSampler, SwarmLoadImageB64, SwarmLoraLoader, SwarmMasks, SwarmSaveImageWS, SwarmTiling, SwarmExtractLora, SwarmUnsampler, SwarmLatents, SwarmInputNodes, SwarmTextHandling, SwarmReference, SwarmMath
4
+
5
+ WEB_DIRECTORY = "./web"
6
+
7
+ NODE_CLASS_MAPPINGS = (
8
+ SwarmBlending.NODE_CLASS_MAPPINGS
9
+ | SwarmClipSeg.NODE_CLASS_MAPPINGS
10
+ | SwarmImages.NODE_CLASS_MAPPINGS
11
+ | SwarmInternalUtil.NODE_CLASS_MAPPINGS
12
+ | SwarmKSampler.NODE_CLASS_MAPPINGS
13
+ | SwarmLoadImageB64.NODE_CLASS_MAPPINGS
14
+ | SwarmLoraLoader.NODE_CLASS_MAPPINGS
15
+ | SwarmMasks.NODE_CLASS_MAPPINGS
16
+ | SwarmSaveImageWS.NODE_CLASS_MAPPINGS
17
+ | SwarmTiling.NODE_CLASS_MAPPINGS
18
+ | SwarmExtractLora.NODE_CLASS_MAPPINGS
19
+ | SwarmUnsampler.NODE_CLASS_MAPPINGS
20
+ | SwarmLatents.NODE_CLASS_MAPPINGS
21
+ | SwarmInputNodes.NODE_CLASS_MAPPINGS
22
+ | SwarmTextHandling.NODE_CLASS_MAPPINGS
23
+ | SwarmReference.NODE_CLASS_MAPPINGS
24
+ | SwarmMath.NODE_CLASS_MAPPINGS
25
+ )
26
+
27
+ # TODO: Why is there no comfy core register method? 0.o
28
+ def register_model_folder(name):
29
+ if name not in folder_paths.folder_names_and_paths:
30
+ folder_paths.folder_names_and_paths[name] = ([os.path.join(folder_paths.models_dir, name)], folder_paths.supported_pt_extensions)
31
+ else:
32
+ folder_paths.folder_names_and_paths[name] = (folder_paths.folder_names_and_paths[name][0], folder_paths.supported_pt_extensions)
33
+
34
+ register_model_folder("yolov8")
__pycache__/SwarmBlending.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
__pycache__/SwarmBlending.cpython-313.pyc ADDED
Binary file (2.33 kB). View file
 
__pycache__/SwarmClipSeg.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
__pycache__/SwarmClipSeg.cpython-313.pyc ADDED
Binary file (5.04 kB). View file
 
__pycache__/SwarmExtractLora.cpython-310.pyc ADDED
Binary file (5.14 kB). View file
 
__pycache__/SwarmExtractLora.cpython-313.pyc ADDED
Binary file (9.08 kB). View file
 
__pycache__/SwarmImages.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
__pycache__/SwarmImages.cpython-313.pyc ADDED
Binary file (21.3 kB). View file
 
__pycache__/SwarmInputNodes.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
__pycache__/SwarmInputNodes.cpython-313.pyc ADDED
Binary file (18.3 kB). View file
 
__pycache__/SwarmInternalUtil.cpython-310.pyc ADDED
Binary file (3.72 kB). View file
 
__pycache__/SwarmInternalUtil.cpython-313.pyc ADDED
Binary file (5.12 kB). View file
 
__pycache__/SwarmKSampler.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
__pycache__/SwarmKSampler.cpython-313.pyc ADDED
Binary file (21.4 kB). View file
 
__pycache__/SwarmLatents.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
__pycache__/SwarmLatents.cpython-313.pyc ADDED
Binary file (2.24 kB). View file
 
__pycache__/SwarmLoadImageB64.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
__pycache__/SwarmLoadImageB64.cpython-313.pyc ADDED
Binary file (3.29 kB). View file
 
__pycache__/SwarmLoraLoader.cpython-310.pyc ADDED
Binary file (2.06 kB). View file
 
__pycache__/SwarmLoraLoader.cpython-313.pyc ADDED
Binary file (2.77 kB). View file
 
__pycache__/SwarmMasks.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
__pycache__/SwarmMasks.cpython-313.pyc ADDED
Binary file (16.1 kB). View file
 
__pycache__/SwarmMath.cpython-310.pyc ADDED
Binary file (926 Bytes). View file
 
__pycache__/SwarmMath.cpython-313.pyc ADDED
Binary file (1.06 kB). View file
 
__pycache__/SwarmReference.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
__pycache__/SwarmReference.cpython-313.pyc ADDED
Binary file (3.42 kB). View file
 
__pycache__/SwarmSaveImageWS.cpython-310.pyc ADDED
Binary file (6.92 kB). View file
 
__pycache__/SwarmSaveImageWS.cpython-313.pyc ADDED
Binary file (10.1 kB). View file
 
__pycache__/SwarmTextHandling.cpython-310.pyc ADDED
Binary file (8.93 kB). View file
 
__pycache__/SwarmTextHandling.cpython-313.pyc ADDED
Binary file (12.4 kB). View file
 
__pycache__/SwarmTiling.cpython-310.pyc ADDED
Binary file (3.43 kB). View file
 
__pycache__/SwarmTiling.cpython-313.pyc ADDED
Binary file (5.99 kB). View file