From 05a1daabc137e3132898330800c530288e7dc7d7 Mon Sep 17 00:00:00 2001 From: Shawn Date: Mon, 19 Aug 2024 14:56:47 -0400 Subject: [PATCH] add more built-in workflows --- src/workflows/index.ts | 6 + src/workflows/sd1.5/img2img.json | 140 +++++++++++ src/workflows/sd1.5/img2img.ts | 211 ++++++++++++++++ src/workflows/sd1.5/txt2img.ts | 2 +- src/workflows/sdxl/txt2img-with-refiner.json | 178 ++++++++++++++ src/workflows/sdxl/txt2img-with-refiner.ts | 239 +++++++++++++++++++ src/workflows/sdxl/txt2img.json | 107 +++++++++ src/workflows/sdxl/txt2img.ts | 164 +++++++++++++ 8 files changed, 1046 insertions(+), 1 deletion(-) create mode 100644 src/workflows/sd1.5/img2img.json create mode 100644 src/workflows/sd1.5/img2img.ts create mode 100644 src/workflows/sdxl/txt2img-with-refiner.json create mode 100644 src/workflows/sdxl/txt2img-with-refiner.ts create mode 100644 src/workflows/sdxl/txt2img.json create mode 100644 src/workflows/sdxl/txt2img.ts diff --git a/src/workflows/index.ts b/src/workflows/index.ts index a94e0f2..11ef991 100644 --- a/src/workflows/index.ts +++ b/src/workflows/index.ts @@ -2,7 +2,10 @@ import config from "../config"; import fluxTxt2img from "../workflows/flux/txt2img"; import fluxImg2img from "../workflows/flux/img2img"; import sd15Txt2img from "../workflows/sd1.5/txt2img"; +import sd15Img2img from "../workflows/sd1.5/img2img"; +import sdxlTxt2img from "../workflows/sdxl/txt2img"; import sdxlImg2img from "../workflows/sdxl/img2img"; +import sdxlTxt2imgWithRefiner from "../workflows/sdxl/txt2img-with-refiner"; import { Workflow } from "../types"; const workflows: { @@ -16,9 +19,12 @@ const workflows: { }, "sd1.5": { txt2img: sd15Txt2img, + img2img: sd15Img2img, }, sdxl: { + txt2img: sdxlTxt2img, img2img: sdxlImg2img, + "txt2img-with-refiner": sdxlTxt2imgWithRefiner, }, }; diff --git a/src/workflows/sd1.5/img2img.json b/src/workflows/sd1.5/img2img.json new file mode 100644 index 0000000..2035c44 --- /dev/null +++ b/src/workflows/sd1.5/img2img.json @@ -0,0 +1,140 @@ +{ + "3": { + "inputs": { + "seed": 818335187507771, + "steps": 15, + "cfg": 8, + "sampler_name": "euler", + "scheduler": "normal", + "denoise": 0.8, + "model": [ + "4", + 0 + ], + "positive": [ + "6", + 0 + ], + "negative": [ + "7", + 0 + ], + "latent_image": [ + "12", + 0 + ] + }, + "class_type": "KSampler", + "_meta": { + "title": "KSampler" + } + }, + "4": { + "inputs": { + "ckpt_name": "dreamshaper_8.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint" + } + }, + "6": { + "inputs": { + "text": "A girl in a pink dress with cat ears, magazine photograph", + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "7": { + "inputs": { + "text": "text, watermark", + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "8": { + "inputs": { + "samples": [ + "3", + 0 + ], + "vae": [ + "4", + 2 + ] + }, + "class_type": "VAEDecode", + "_meta": { + "title": "VAE Decode" + } + }, + "9": { + "inputs": { + "filename_prefix": "ComfyUI", + "images": [ + "8", + 0 + ] + }, + "class_type": "SaveImage", + "_meta": { + "title": "Save Image" + } + }, + "10": { + "inputs": { + "image": "example.png", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "11": { + "inputs": { + "width": 512, + "height": 512, + "interpolation": "nearest", + "method": "keep proportion", + "condition": "always", + "multiple_of": 0, + "image": [ + "10", + 0 + ] + }, + "class_type": "ImageResize+", + "_meta": { + "title": "🔧 Image Resize" + } + }, + "12": { + "inputs": { + "pixels": [ + "11", + 0 + ], + "vae": [ + "4", + 2 + ] + }, + "class_type": "VAEEncode", + "_meta": { + "title": "VAE Encode" + } + } +} \ No newline at end of file diff --git a/src/workflows/sd1.5/img2img.ts b/src/workflows/sd1.5/img2img.ts new file mode 100644 index 0000000..1a1cb9e --- /dev/null +++ b/src/workflows/sd1.5/img2img.ts @@ -0,0 +1,211 @@ +import { z } from "zod"; +import { ComfyNode, Workflow } from "../../types"; +import config from "../../config"; + +let checkpoint: any = config.models.checkpoints.enum.optional(); +if (config.warmupCkpt) { + checkpoint = checkpoint.default(config.warmupCkpt); +} + +const RequestSchema = z.object({ + prompt: z.string().describe("The positive prompt for image generation"), + negative_prompt: z + .string() + .optional() + .default("text, watermark") + .describe("The negative prompt for image generation"), + seed: z + .number() + .int() + .optional() + .default(() => Math.floor(Math.random() * 1000000000000000)) + .describe("Seed for random number generation"), + steps: z + .number() + .int() + .min(1) + .max(100) + .optional() + .default(15) + .describe("Number of sampling steps"), + cfg_scale: z + .number() + .min(0) + .max(20) + .optional() + .default(8) + .describe("Classifier-free guidance scale"), + sampler_name: z + .enum(["euler"]) + .optional() + .default("euler") + .describe("Name of the sampler to use"), + scheduler: z + .enum(["normal"]) + .optional() + .default("normal") + .describe("Type of scheduler to use"), + denoise: z + .number() + .min(0) + .max(1) + .optional() + .default(0.8) + .describe("Denoising strength"), + checkpoint, + image: z.string().describe("Input image for img2img"), + width: z + .number() + .int() + .min(64) + .max(2048) + .optional() + .default(512) + .describe("Width of the generated image"), + height: z + .number() + .int() + .min(64) + .max(2048) + .optional() + .default(512) + .describe("Height of the generated image"), + interpolation: z + .enum(["nearest"]) + .optional() + .default("nearest") + .describe("Interpolation method for image resizing"), + resize_method: z + .enum(["keep proportion"]) + .optional() + .default("keep proportion") + .describe("Method for resizing the image"), + resize_condition: z + .enum(["always"]) + .optional() + .default("always") + .describe("Condition for when to resize the image"), + multiple_of: z + .number() + .int() + .min(0) + .optional() + .default(0) + .describe("Ensure dimensions are multiples of this value"), +}); + +type InputType = z.infer; + +function generateWorkflow(input: InputType): Record { + return { + "3": { + inputs: { + seed: input.seed, + steps: input.steps, + cfg: input.cfg_scale, + sampler_name: input.sampler_name, + scheduler: input.scheduler, + denoise: input.denoise, + model: ["4", 0], + positive: ["6", 0], + negative: ["7", 0], + latent_image: ["12", 0], + }, + class_type: "KSampler", + _meta: { + title: "KSampler", + }, + }, + "4": { + inputs: { + ckpt_name: input.checkpoint, + }, + class_type: "CheckpointLoaderSimple", + _meta: { + title: "Load Checkpoint", + }, + }, + "6": { + inputs: { + text: input.prompt, + clip: ["4", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "7": { + inputs: { + text: input.negative_prompt, + clip: ["4", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "8": { + inputs: { + samples: ["3", 0], + vae: ["4", 2], + }, + class_type: "VAEDecode", + _meta: { + title: "VAE Decode", + }, + }, + "9": { + inputs: { + filename_prefix: "ComfyUI", + images: ["8", 0], + }, + class_type: "SaveImage", + _meta: { + title: "Save Image", + }, + }, + "10": { + inputs: { + image: input.image, + upload: "image", + }, + class_type: "LoadImage", + _meta: { + title: "Load Image", + }, + }, + "11": { + inputs: { + width: input.width, + height: input.height, + interpolation: input.interpolation, + method: input.resize_method, + condition: input.resize_condition, + multiple_of: input.multiple_of, + image: ["10", 0], + }, + class_type: "ImageResize+", + _meta: { + title: "🔧 Image Resize", + }, + }, + "12": { + inputs: { + pixels: ["11", 0], + vae: ["4", 2], + }, + class_type: "VAEEncode", + _meta: { + title: "VAE Encode", + }, + }, + }; +} + +const workflow: Workflow = { + RequestSchema, + generateWorkflow, +}; + +export default workflow; diff --git a/src/workflows/sd1.5/txt2img.ts b/src/workflows/sd1.5/txt2img.ts index 5d4331b..0679500 100644 --- a/src/workflows/sd1.5/txt2img.ts +++ b/src/workflows/sd1.5/txt2img.ts @@ -33,7 +33,7 @@ const RequestSchema = z.object({ .number() .int() .optional() - .default(712610403220747) + .default(() => Math.floor(Math.random() * 100000000000)) .describe("Seed for random number generation"), steps: z .number() diff --git a/src/workflows/sdxl/txt2img-with-refiner.json b/src/workflows/sdxl/txt2img-with-refiner.json new file mode 100644 index 0000000..c47e562 --- /dev/null +++ b/src/workflows/sdxl/txt2img-with-refiner.json @@ -0,0 +1,178 @@ +{ + "4": { + "inputs": { + "ckpt_name": "sd_xl_base_1.0.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint - BASE" + } + }, + "5": { + "inputs": { + "width": 1024, + "height": 1024, + "batch_size": 1 + }, + "class_type": "EmptyLatentImage", + "_meta": { + "title": "Empty Latent Image" + } + }, + "6": { + "inputs": { + "text": "evening sunset scenery blue sky nature, glass bottle with a galaxy in it", + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "7": { + "inputs": { + "text": "text, watermark", + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "10": { + "inputs": { + "add_noise": "enable", + "noise_seed": 721897303308196, + "steps": 25, + "cfg": 8, + "sampler_name": "euler", + "scheduler": "normal", + "start_at_step": 0, + "end_at_step": 20, + "return_with_leftover_noise": "enable", + "model": [ + "4", + 0 + ], + "positive": [ + "6", + 0 + ], + "negative": [ + "7", + 0 + ], + "latent_image": [ + "5", + 0 + ] + }, + "class_type": "KSamplerAdvanced", + "_meta": { + "title": "KSampler (Advanced) - BASE" + } + }, + "11": { + "inputs": { + "add_noise": "disable", + "noise_seed": 0, + "steps": 25, + "cfg": 8, + "sampler_name": "euler", + "scheduler": "normal", + "start_at_step": 20, + "end_at_step": 10000, + "return_with_leftover_noise": "disable", + "model": [ + "12", + 0 + ], + "positive": [ + "15", + 0 + ], + "negative": [ + "16", + 0 + ], + "latent_image": [ + "10", + 0 + ] + }, + "class_type": "KSamplerAdvanced", + "_meta": { + "title": "KSampler (Advanced) - REFINER" + } + }, + "12": { + "inputs": { + "ckpt_name": "sd_xl_refiner_1.0.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint - REFINER" + } + }, + "15": { + "inputs": { + "text": "evening sunset scenery blue sky nature, glass bottle with a galaxy in it", + "clip": [ + "12", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "16": { + "inputs": { + "text": "text, watermark", + "clip": [ + "12", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "17": { + "inputs": { + "samples": [ + "11", + 0 + ], + "vae": [ + "12", + 2 + ] + }, + "class_type": "VAEDecode", + "_meta": { + "title": "VAE Decode" + } + }, + "19": { + "inputs": { + "filename_prefix": "ComfyUI", + "images": [ + "17", + 0 + ] + }, + "class_type": "SaveImage", + "_meta": { + "title": "Save Image" + } + } +} \ No newline at end of file diff --git a/src/workflows/sdxl/txt2img-with-refiner.ts b/src/workflows/sdxl/txt2img-with-refiner.ts new file mode 100644 index 0000000..1c2eb5f --- /dev/null +++ b/src/workflows/sdxl/txt2img-with-refiner.ts @@ -0,0 +1,239 @@ +import { z } from "zod"; +import { ComfyNode, Workflow } from "../../types"; +import config from "../../config"; + +let checkpoint: any = config.models.checkpoints.enum.optional(); +if (config.warmupCkpt) { + checkpoint = checkpoint.default(config.warmupCkpt); +} + +const RequestSchema = z.object({ + prompt: z.string().describe("The positive prompt for image generation"), + negative_prompt: z + .string() + .optional() + .default("text, watermark") + .describe("The negative prompt for image generation"), + width: z + .number() + .int() + .min(256) + .max(2048) + .optional() + .default(1024) + .describe("Width of the generated image"), + height: z + .number() + .int() + .min(256) + .max(2048) + .optional() + .default(1024) + .describe("Height of the generated image"), + seed: z + .number() + .int() + .optional() + .default(() => Math.floor(Math.random() * 1000000000000000)) + .describe("Seed for random number generation"), + steps: z + .number() + .int() + .min(1) + .max(100) + .optional() + .default(25) + .describe("Number of sampling steps"), + cfg_scale: z + .number() + .min(0) + .max(20) + .optional() + .default(8) + .describe("Classifier-free guidance scale"), + sampler_name: z + .enum(["euler"]) + .optional() + .default("euler") + .describe("Name of the sampler to use"), + scheduler: z + .enum(["normal"]) + .optional() + .default("normal") + .describe("Type of scheduler to use"), + base_start_step: z + .number() + .int() + .min(0) + .max(100) + .optional() + .default(0) + .describe("Start step for base model sampling"), + base_end_step: z + .number() + .int() + .min(0) + .max(100) + .optional() + .default(20) + .describe("End step for base model sampling"), + refiner_start_step: z + .number() + .int() + .min(0) + .max(100) + .optional() + .default(20) + .describe("Start step for refiner model sampling"), + checkpoint, + refiner_checkpoint: z + .string() + .optional() + .default("sd_xl_refiner_1.0.safetensors") + .describe("Checkpoint for the refiner model"), +}); + +type InputType = z.infer; + +function generateWorkflow(input: InputType): Record { + return { + "4": { + inputs: { + ckpt_name: input.checkpoint, + }, + class_type: "CheckpointLoaderSimple", + _meta: { + title: "Load Checkpoint - BASE", + }, + }, + "5": { + inputs: { + width: input.width, + height: input.height, + batch_size: 1, + }, + class_type: "EmptyLatentImage", + _meta: { + title: "Empty Latent Image", + }, + }, + "6": { + inputs: { + text: input.prompt, + clip: ["4", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "7": { + inputs: { + text: input.negative_prompt, + clip: ["4", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "10": { + inputs: { + add_noise: "enable", + noise_seed: input.seed, + steps: input.steps, + cfg: input.cfg_scale, + sampler_name: input.sampler_name, + scheduler: input.scheduler, + start_at_step: input.base_start_step, + end_at_step: input.base_end_step, + return_with_leftover_noise: "enable", + model: ["4", 0], + positive: ["6", 0], + negative: ["7", 0], + latent_image: ["5", 0], + }, + class_type: "KSamplerAdvanced", + _meta: { + title: "KSampler (Advanced) - BASE", + }, + }, + "11": { + inputs: { + add_noise: "disable", + noise_seed: 0, + steps: input.steps, + cfg: input.cfg_scale, + sampler_name: input.sampler_name, + scheduler: input.scheduler, + start_at_step: input.refiner_start_step, + end_at_step: 10000, + return_with_leftover_noise: "disable", + model: ["12", 0], + positive: ["15", 0], + negative: ["16", 0], + latent_image: ["10", 0], + }, + class_type: "KSamplerAdvanced", + _meta: { + title: "KSampler (Advanced) - REFINER", + }, + }, + "12": { + inputs: { + ckpt_name: input.refiner_checkpoint, + }, + class_type: "CheckpointLoaderSimple", + _meta: { + title: "Load Checkpoint - REFINER", + }, + }, + "15": { + inputs: { + text: input.prompt, + clip: ["12", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "16": { + inputs: { + text: input.negative_prompt, + clip: ["12", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "17": { + inputs: { + samples: ["11", 0], + vae: ["12", 2], + }, + class_type: "VAEDecode", + _meta: { + title: "VAE Decode", + }, + }, + "19": { + inputs: { + filename_prefix: "ComfyUI", + images: ["17", 0], + }, + class_type: "SaveImage", + _meta: { + title: "Save Image", + }, + }, + }; +} + +const workflow: Workflow = { + RequestSchema, + generateWorkflow, +}; + +export default workflow; diff --git a/src/workflows/sdxl/txt2img.json b/src/workflows/sdxl/txt2img.json new file mode 100644 index 0000000..80b2bff --- /dev/null +++ b/src/workflows/sdxl/txt2img.json @@ -0,0 +1,107 @@ +{ + "4": { + "inputs": { + "ckpt_name": "sd_xl_base_1.0.safetensors" + }, + "class_type": "CheckpointLoaderSimple", + "_meta": { + "title": "Load Checkpoint - BASE" + } + }, + "5": { + "inputs": { + "width": 1024, + "height": 1024, + "batch_size": 1 + }, + "class_type": "EmptyLatentImage", + "_meta": { + "title": "Empty Latent Image" + } + }, + "6": { + "inputs": { + "text": "evening sunset scenery blue sky nature, glass bottle with a galaxy in it", + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "7": { + "inputs": { + "text": "text, watermark", + "clip": [ + "4", + 1 + ] + }, + "class_type": "CLIPTextEncode", + "_meta": { + "title": "CLIP Text Encode (Prompt)" + } + }, + "17": { + "inputs": { + "samples": [ + "49", + 0 + ], + "vae": [ + "4", + 2 + ] + }, + "class_type": "VAEDecode", + "_meta": { + "title": "VAE Decode" + } + }, + "19": { + "inputs": { + "filename_prefix": "ComfyUI", + "images": [ + "17", + 0 + ] + }, + "class_type": "SaveImage", + "_meta": { + "title": "Save Image" + } + }, + "49": { + "inputs": { + "seed": 0, + "steps": 20, + "cfg": 8, + "sampler_name": "euler", + "scheduler": "normal", + "denoise": 1, + "model": [ + "4", + 0 + ], + "positive": [ + "6", + 0 + ], + "negative": [ + "7", + 0 + ], + "latent_image": [ + "5", + 0 + ] + }, + "class_type": "KSampler", + "_meta": { + "title": "KSampler" + } + } +} \ No newline at end of file diff --git a/src/workflows/sdxl/txt2img.ts b/src/workflows/sdxl/txt2img.ts new file mode 100644 index 0000000..db3c050 --- /dev/null +++ b/src/workflows/sdxl/txt2img.ts @@ -0,0 +1,164 @@ +import { z } from "zod"; +import { ComfyNode, Workflow } from "../../types"; +import config from "../../config"; + +let checkpoint: any = config.models.checkpoints.enum.optional(); +if (config.warmupCkpt) { + checkpoint = checkpoint.default(config.warmupCkpt); +} + +const RequestSchema = z.object({ + prompt: z.string().describe("The positive prompt for image generation"), + negative_prompt: z + .string() + .optional() + .default("text, watermark") + .describe("The negative prompt for image generation"), + width: z + .number() + .int() + .min(256) + .max(2048) + .optional() + .default(1024) + .describe("Width of the generated image"), + height: z + .number() + .int() + .min(256) + .max(2048) + .optional() + .default(1024) + .describe("Height of the generated image"), + seed: z + .number() + .int() + .optional() + .default(() => Math.floor(Math.random() * 100000000000)) + .describe("Seed for random number generation"), + steps: z + .number() + .int() + .min(1) + .max(100) + .optional() + .default(20) + .describe("Number of sampling steps"), + cfg_scale: z + .number() + .min(0) + .max(20) + .optional() + .default(8) + .describe("Classifier-free guidance scale"), + sampler_name: z + .enum(["euler"]) + .optional() + .default("euler") + .describe("Name of the sampler to use"), + scheduler: z + .enum(["normal"]) + .optional() + .default("normal") + .describe("Type of scheduler to use"), + denoise: z + .number() + .min(0) + .max(1) + .optional() + .default(1) + .describe("Denoising strength"), + checkpoint, +}); + +type InputType = z.infer; + +function generateWorkflow(input: InputType): Record { + return { + "4": { + inputs: { + ckpt_name: input.checkpoint, + }, + class_type: "CheckpointLoaderSimple", + _meta: { + title: "Load Checkpoint - BASE", + }, + }, + "5": { + inputs: { + width: input.width, + height: input.height, + batch_size: 1, + }, + class_type: "EmptyLatentImage", + _meta: { + title: "Empty Latent Image", + }, + }, + "6": { + inputs: { + text: input.prompt, + clip: ["4", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "7": { + inputs: { + text: input.negative_prompt, + clip: ["4", 1], + }, + class_type: "CLIPTextEncode", + _meta: { + title: "CLIP Text Encode (Prompt)", + }, + }, + "17": { + inputs: { + samples: ["49", 0], + vae: ["4", 2], + }, + class_type: "VAEDecode", + _meta: { + title: "VAE Decode", + }, + }, + "19": { + inputs: { + filename_prefix: "ComfyUI", + images: ["17", 0], + }, + class_type: "SaveImage", + _meta: { + title: "Save Image", + }, + }, + "49": { + inputs: { + seed: input.seed, + steps: input.steps, + cfg: input.cfg_scale, + sampler_name: input.sampler_name, + scheduler: input.scheduler, + denoise: input.denoise, + model: ["4", 0], + positive: ["6", 0], + negative: ["7", 0], + latent_image: ["5", 0], + }, + class_type: "KSampler", + _meta: { + title: "KSampler", + }, + }, + }; +} + +const workflow: Workflow = { + RequestSchema, + generateWorkflow, +}; + +export default workflow;