BatchTagger / ametralladora_API.py
Coercer's picture
Upload ametralladora_API.py
eaebbb3 verified
#This is an example that uses the websockets api to know when a prompt execution is done
#Once the prompt execution is done it downloads the images using the /history endpoint
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
import uuid
import json
import urllib.request
import urllib.parse
server_address = "127.0.0.1:8188"
client_id = str(uuid.uuid4())
def queue_prompt(prompt, prompt_id):
p = {"prompt": prompt, "client_id": client_id, "prompt_id": prompt_id}
data = json.dumps(p).encode('utf-8')
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
urllib.request.urlopen(req).read()
def get_image(filename, subfolder, folder_type):
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
url_values = urllib.parse.urlencode(data)
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
return response.read()
def get_history(prompt_id):
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
return json.loads(response.read())
def get_images(ws, prompt):
prompt_id = str(uuid.uuid4())
queue_prompt(prompt, prompt_id)
output_images = {}
while True:
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message['type'] == 'executing':
data = message['data']
if data['node'] is None and data['prompt_id'] == prompt_id:
break #Execution is done
else:
# If you want to be able to decode the binary stream for latent previews, here is how you can do it:
# bytesIO = BytesIO(out[8:])
# preview_image = Image.open(bytesIO) # This is your preview in PIL image format, store it in a global
continue #previews are binary data
history = get_history(prompt_id)[prompt_id]
for node_id in history['outputs']:
node_output = history['outputs'][node_id]
images_output = []
if 'images' in node_output:
for image in node_output['images']:
image_data = get_image(image['filename'], image['subfolder'], image['type'])
images_output.append(image_data)
output_images[node_id] = images_output
return output_images
prompt_text = """
{
"3": {
"inputs": {
"seed": 473371463840349,
"steps": 8,
"cfg": 1,
"sampler_name": "lcm",
"scheduler": "beta",
"denoise": 1,
"model": [
"12",
0
],
"positive": [
"10",
0
],
"negative": [
"7",
0
],
"latent_image": [
"16",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "novaFurryXL_illustriousV110.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Cargar Punto de Control"
}
},
"7": {
"inputs": {
"text": "Xx_NEGPROMPT_xX",
"clip": [
"11",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Codificar Texto CLIP (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "Decodificación VAE"
}
},
"9": {
"inputs": {
"filename_prefix": "Fast",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Guardar Imagen"
}
},
"10": {
"inputs": {
"text": "Xx_PROMPT_xX",
"clip": [
"11",
1
]
},
"class_type": "CLIPTextEncodeWithBreak",
"_meta": {
"title": "CLIPTextEncode with BREAK syntax"
}
},
"11": {
"inputs": {
"lora_name": "dmd2_sdxl_4step_lora_fp16.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"4",
0
],
"clip": [
"4",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Cargar LoRA"
}
},
"12": {
"inputs": {
"block_number": 3,
"downscale_factor": 2,
"start_percent": 0,
"end_percent": 0.5,
"downscale_after_skip": true,
"downscale_method": "bicubic",
"upscale_method": "bicubic",
"model": [
"11",
0
]
},
"class_type": "PatchModelAddDownscale",
"_meta": {
"title": "PatchModelAddDownscale (Kohya Deep Shrink)"
}
},
"16": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Imagen Latente Vacía"
}
}
}
"""
prompt = json.loads(prompt_text)
#set the text prompt for our positive CLIPTextEncode
prompt["10"]["inputs"]["text"] = "masterpiece best quality man"
#set the text prompt for our negative CLIPTextEncode
prompt["7"]["inputs"]["text"] = "worst quailty"
#set seed
prompt["3"]["inputs"]["seed"] = 5345435
ws = websocket.WebSocket()
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
images = get_images(ws, prompt)
ws.close() # for in case this example is used in an environment where it will be repeatedly called, like in a Gradio app. otherwise, you'll randomly receive connection timeouts
#Commented out code to display the output images:
for node_id in images:
for image_data in images[node_id]:
from PIL import Image
import io
image = Image.open(io.BytesIO(image_data))
image.show()