{ "id": "14cfe0d4-af51-4a99-b160-53cee3b55d61", "revision": 0, "last_node_id": 19, "last_link_id": 22, "nodes": [ { "id": 8, "type": "VAEDecode", "pos": [ 1209, 188 ], "size": [ 210, 46 ], "flags": {}, "order": 10, "mode": 0, "inputs": [ { "name": "samples", "type": "LATENT", "link": 14 }, { "name": "vae", "type": "VAE", "link": 21 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "slot_index": 0, "links": [ 16 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "VAEDecode" }, "widgets_values": [] }, { "id": 13, "type": "EmptySD3LatentImage", "pos": [ 530, 620 ], "size": [ 315, 106 ], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "LATENT", "type": "LATENT", "slot_index": 0, "links": [ 17 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "EmptySD3LatentImage" }, "widgets_values": [ 1024, 1024, 1 ] }, { "id": 9, "type": "SaveImage", "pos": [ 1454.93896484375, 190.9700164794922 ], "size": [ 976.0567626953125, 1060.9766845703125 ], "flags": {}, "order": 11, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", "link": 16 } ], "outputs": [], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75" }, "widgets_values": [ "ComfyUI" ] }, { "id": 11, "type": "ModelSamplingAuraFlow", "pos": [ 524.8006591796875, 81.51603698730469 ], "size": [ 315, 58 ], "flags": {}, "order": 8, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 22 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "slot_index": 0, "links": [ 13 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "ModelSamplingAuraFlow" }, "widgets_values": [ 6 ] }, { "id": 7, "type": "CLIPTextEncode", "pos": [ 420, 400 ], "size": [ 425.27801513671875, 180.6060791015625 ], "flags": {}, "order": 7, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 20 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 6 ] } ], "title": "CLIP Text Encode (Negative Prompt)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "blurry ugly bad" ], "color": "#322", "bgcolor": "#533" }, { "id": 15, "type": "Note", "pos": [ 71.95149993896484, 192.96051025390625 ], "size": [ 319.26513671875, 197.89625549316406 ], "flags": {}, "order": 1, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "The \"You are an assistant... \" text before the actual prompt is the one used in the official example.\n\nThe reason it is exposed to the user like this is because the model still works if you modify or remove it." ], "color": "#432", "bgcolor": "#653" }, { "id": 14, "type": "Note", "pos": [ 860, -50 ], "size": [ 310, 180 ], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "The official way to sample this model is: shift 6 with 36 steps\n\nSampling it with lower steps works but you might have to lower the shift value to reduce the amount of artifacts.\n\nEx: 20 steps with shift 3 seems to not produce artifacts" ], "color": "#432", "bgcolor": "#653" }, { "id": 6, "type": "CLIPTextEncode", "pos": [ 420, 190 ], "size": [ 423.83001708984375, 177.11770629882812 ], "flags": {}, "order": 6, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 19 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "slot_index": 0, "links": [ 4 ] } ], "title": "CLIP Text Encode (Positive Prompt)", "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "You are an assistant designed to generate superior images with the superior degree of image-text alignment based on textual prompts or user prompts. a cute anime girl with massive fennec ears mouth open and a big fluffy tail long blonde hair and blue eyes wearing a maid outfit with a long black dress and a large purple liquid stained white apron and white gloves and black leggings sitting on a large cushion in the middle of a kitchen in a dark victorian mansion with a stained glass window drinking a glass with a galaxy inside" ], "color": "#232", "bgcolor": "#353" }, { "id": 17, "type": "CLIPLoader", "pos": [ 84.4089852709569, 579.9749737092388 ], "size": [ 270, 106 ], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [ { "name": "CLIP", "type": "CLIP", "links": [ 19, 20 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "CLIPLoader" }, "widgets_values": [ "gemma_2_2b_fp16.safetensors", "lumina2", "default" ] }, { "id": 18, "type": "VAELoader", "pos": [ 82.24204408227945, 732.7443275110128 ], "size": [ 270, 58 ], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [ { "name": "VAE", "type": "VAE", "links": [ 21 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "VAELoader" }, "widgets_values": [ "ae.safetensors" ] }, { "id": 19, "type": "DFloat11ModelLoader", "pos": [ 68.859432083083, 460.61347106132257 ], "size": [ 281.861328125, 58 ], "flags": {}, "order": 5, "mode": 0, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 22 ] } ], "properties": { "aux_id": "mingyi456/ComfyUI-DFloat11-Extended", "ver": "a4538723928a03ace4c18047668c020dd32feb66", "Node name for S&R": "DFloat11ModelLoader" }, "widgets_values": [ "lumina_2_model_bf16-DF11.safetensors" ] }, { "id": 3, "type": "KSampler", "pos": [ 863, 186 ], "size": [ 315, 262 ], "flags": {}, "order": 9, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 13 }, { "name": "positive", "type": "CONDITIONING", "link": 4 }, { "name": "negative", "type": "CONDITIONING", "link": 6 }, { "name": "latent_image", "type": "LATENT", "link": 17 } ], "outputs": [ { "name": "LATENT", "type": "LATENT", "slot_index": 0, "links": [ 14 ] } ], "properties": { "cnr_id": "comfy-core", "ver": "0.3.75", "Node name for S&R": "KSampler" }, "widgets_values": [ 854547257397629, "randomize", 25, 4, "res_multistep", "simple", 1 ] } ], "links": [ [ 4, 6, 0, 3, 1, "CONDITIONING" ], [ 6, 7, 0, 3, 2, "CONDITIONING" ], [ 13, 11, 0, 3, 0, "MODEL" ], [ 14, 3, 0, 8, 0, "LATENT" ], [ 16, 8, 0, 9, 0, "IMAGE" ], [ 17, 13, 0, 3, 3, "LATENT" ], [ 19, 17, 0, 6, 0, "CLIP" ], [ 20, 17, 0, 7, 0, "CLIP" ], [ 21, 18, 0, 8, 1, "VAE" ], [ 22, 19, 0, 11, 0, "MODEL" ] ], "groups": [], "config": {}, "extra": { "ds": { "scale": 1.0152559799477197, "offset": [ 148.54386753733925, 82.03785124693952 ] }, "frontendVersion": "1.32.9", "workflowRendererVersion": "LG" }, "version": 0.4 }