Skip to content

Commit

Permalink
XXX: Single thread hordelib dummy run
Browse files Browse the repository at this point in the history
  • Loading branch information
tazlin committed Aug 27, 2023
1 parent 2d6fc59 commit e3558ec
Show file tree
Hide file tree
Showing 44 changed files with 1,105 additions and 826 deletions.
370 changes: 188 additions & 182 deletions hordelib/comfy_horde.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion hordelib/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from hordelib.config_path import get_hordelib_path

COMFYUI_VERSION = "84ea21c815d426000c233e0c7b8c542764335cc8"
COMFYUI_VERSION = "e3d0a9a490195911d0c200f8dbde2991b7421678"
"""The exact version of ComfyUI version to load."""

REMOTE_PROXY = ""
Expand Down
123 changes: 80 additions & 43 deletions hordelib/horde.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from PIL import Image

from hordelib.comfy_horde import Comfy_Horde
from hordelib.consts import MODEL_CATEGORY_NAMES
from hordelib.shared_model_manager import SharedModelManager
from hordelib.utils.dynamicprompt import DynamicPromptParser
from hordelib.utils.image_utils import ImageUtils
Expand Down Expand Up @@ -65,6 +66,19 @@ class HordeLib:
# "<unused>": "PiDiNetPreprocessor",
}

CONTROLNET_MODEL_MAP = {
"canny": "diff_control_sd15_canny_fp16.safetensors",
"hed": "diff_control_sd15_hed_fp16.safetensors",
"depth": "diff_control_sd15_depth_fp16.safetensors",
"normal": "control_normal_fp16.safetensors",
"openpose": "control_openpose_fp16.safetensors",
"seg": "control_seg_fp16.safetensors",
"scribble": "control_scribble_fp16.safetensors",
"fakescribble": "control_scribble_fp16.safetensors",
"mlsd": "control_mlsd_fp16.safetensors",
"hough": "control_mlsd_fp16.safetensors",
}

SOURCE_IMAGE_PROCESSING_OPTIONS = ["img2img", "inpainting", "outpainting"]

SCHEDULERS = ["normal", "karras", "simple", "ddim_uniform"]
Expand Down Expand Up @@ -119,6 +133,7 @@ class HordeLib:
"negative_prompt.text": "negative_prompt",
"sampler.steps": "ddim_steps",
"empty_latent_image.batch_size": "n_iter",
"model_loader.ckpt_name": "model",
"model_loader.model_name": "model",
"image_loader.image": "source_image",
"loras": "loras",
Expand Down Expand Up @@ -225,6 +240,42 @@ def _apply_aihorde_compatibility_hacks(self, payload):
"""
payload = deepcopy(payload)

if payload.get("model"):
if SharedModelManager.manager.compvis.check_model_available(payload["model"]):
model_reference_information = SharedModelManager.manager.compvis.get_model(payload["model"])
model_config = model_reference_information.get("config")
model_files_config = model_config.get("files")

for file_config_entry in model_files_config:
path_config_item = file_config_entry.get("path")
if path_config_item:
if path_config_item.endswith((".ckpt", ".safetensors")):
payload["model"] = path_config_item
break
else:
post_processor_model_managers = SharedModelManager.manager.get_mm_pointers(
[MODEL_CATEGORY_NAMES.codeformer, MODEL_CATEGORY_NAMES.esrgan, MODEL_CATEGORY_NAMES.gfpgan],
)

found_model = False

for post_processor_model_manager in post_processor_model_managers:
if post_processor_model_manager.check_model_available(payload["model"]):
model_reference_information = post_processor_model_manager.get_model(payload["model"])
model_config = model_reference_information.get("config")
model_files_config = model_config.get("files")

for file_config_entry in model_files_config:
path_config_item = file_config_entry.get("path")
if path_config_item:
if path_config_item.endswith((".pth", ".pt", ".safetensors")):
payload["model"] = path_config_item
found_model = True
break

if not found_model:
raise RuntimeError(f"Model {payload['model']} not found! Is it in a Model Reference?")

# Rather than specify a scheduler, only karras or not karras is specified
if payload.get("karras", False):
payload["scheduler"] = "karras"
Expand Down Expand Up @@ -373,9 +424,9 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):
"lora_name": lora["name"],
"strength_model": lora["model"],
"strength_clip": lora["clip"],
"model_manager": SharedModelManager,
# "model_manager": SharedModelManager,
},
"class_type": "HordeLoraLoader",
"class_type": "LoraLoader",
}
else:
# Subsequent chained loras
Expand All @@ -386,9 +437,9 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):
"lora_name": lora["name"],
"strength_model": lora["model"],
"strength_clip": lora["clip"],
"model_manager": SharedModelManager,
# "model_manager": SharedModelManager,
},
"class_type": "HordeLoraLoader",
"class_type": "LoraLoader",
}

for lora_index, lora in enumerate(payload.get("loras")):
Expand Down Expand Up @@ -424,7 +475,7 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):
logger.error(f"Parameter {key} not found")

# Inject our model manager
pipeline_params["model_loader.model_manager"] = SharedModelManager
# pipeline_params["model_loader.model_manager"] = SharedModelManager

# For hires fix, change the image sizes as we create an intermediate image first
if payload.get("hires_fix", False):
Expand All @@ -439,7 +490,11 @@ def _final_pipeline_adjustments(self, payload, pipeline_data):

if payload.get("control_type"):
# Inject control net model manager
pipeline_params["controlnet_model_loader.model_manager"] = SharedModelManager
# pipeline_params["controlnet_model_loader.model_manager"] = SharedModelManager
model_name = self.CONTROLNET_MODEL_MAP.get(payload.get("control_type"))
if not model_name:
logger.error(f"Controlnet model for {payload.get('control_type')} not found")
pipeline_params["controlnet_model_loader.control_net_name"] = model_name

# Dynamically reconnect nodes in the pipeline to connect the correct pre-processor node
if payload.get("return_control_map"):
Expand Down Expand Up @@ -528,19 +583,6 @@ def _process_results(self, images, rawpng):
else:
return results

def lock_models(self, models):
models = [str(x).strip() for x in models if x]
# Try to acquire a model lock, if we can't, wait a while as some other thread
# must have these resources locked
while not self.generator.lock_models(models):
time.sleep(0.1)
logger.debug(f"Locked models {','.join(models)}")

def unlock_models(self, models):
models = [x.strip() for x in models if x]
self.generator.unlock_models(models)
logger.debug(f"Unlocked models {','.join(models)}")

def basic_inference(self, payload, rawpng=False):
# AIHorde hacks to payload
payload = self._apply_aihorde_compatibility_hacks(payload)
Expand All @@ -555,20 +597,17 @@ def basic_inference(self, payload, rawpng=False):
payload = self._final_pipeline_adjustments(payload, pipeline_data)
models: list[str] = []
# Run the pipeline
try:
# Add prefix to loras to avoid name collisions with other models
models = [f"lora-{x['name']}" for x in payload.get("loras", []) if x]
# main model
models.append(payload.get("model_loader.model_name")) # type: ignore # FIXME?
# controlnet model
models.append(payload.get("controlnet_model_loader.control_net_name")) # type: ignore # FIXME?
# Acquire a lock on all these models
self.lock_models(models)
# Call the inference pipeline
# logger.info(payload)
images = self.generator.run_image_pipeline(pipeline_data, payload)
finally:
self.unlock_models(models)

# Add prefix to loras to avoid name collisions with other models
models = [f"lora-{x['name']}" for x in payload.get("loras", []) if x]
# main model models.append(payload.get("model_loader.model_name")) # type: ignore # FIXME?
# controlnet model
models.append(payload.get("controlnet_model_loader.control_net_name")) # type: ignore # FIXME?

# Call the inference pipeline
# logger.info(payload)
images = self.generator.run_image_pipeline(pipeline_data, payload)

return self._process_results(images, rawpng)

def image_upscale(self, payload, rawpng=False) -> Image.Image | None:
Expand All @@ -585,12 +624,11 @@ def image_upscale(self, payload, rawpng=False) -> Image.Image | None:
pipeline_name = "image_upscale"
pipeline_data = self.generator.get_pipeline_data(pipeline_name)
payload = self._final_pipeline_adjustments(payload, pipeline_data)

# Run the pipeline
try:
self.lock_models([payload.get("model_loader.model_name")])
images = self.generator.run_image_pipeline(pipeline_data, payload)
finally:
self.unlock_models([payload.get("model_loader.model_name")])

images = self.generator.run_image_pipeline(pipeline_data, payload)

if images is None:
return None # XXX Log error and/or raise Exception here
# Allow arbitrary resizing by shrinking the image back down
Expand All @@ -607,10 +645,9 @@ def image_facefix(self, payload, rawpng=False) -> Image.Image | None:
pipeline_name = "image_facefix"
pipeline_data = self.generator.get_pipeline_data(pipeline_name)
payload = self._final_pipeline_adjustments(payload, pipeline_data)

# Run the pipeline
try:
self.lock_models([payload.get("model_loader.model_name")])
images = self.generator.run_image_pipeline(pipeline_data, payload)
finally:
self.unlock_models([payload.get("model_loader.model_name")])

images = self.generator.run_image_pipeline(pipeline_data, payload)

return self._process_results(images, rawpng) # type: ignore # FIXME?
61 changes: 31 additions & 30 deletions hordelib/install_comfy.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,39 +121,40 @@ def reset_comfyui_to_version(cls, comfy_version):
cls._run(f"git checkout {comfy_version}", get_comfyui_path())

@classmethod
def apply_patch(cls, patchfile):
def apply_patch(cls, patchfile, skip_dot_patch=True):
# Don't if we're a release version
if RELEASE_VERSION:
return
# Check if the patch has already been applied
result = cls._run_get_result(
f"git apply --check {patchfile}",
get_comfyui_path(),
)
could_apply = not result.returncode
result = cls._run_get_result(
f"git apply --reverse --check {patchfile}",
get_comfyui_path(),
)
could_reverse = not result.returncode
if could_apply:
# Apply the patch
logger.info(f"Applying patch {patchfile}")
result = cls._run_get_result(f"git apply {patchfile}", get_comfyui_path())
logger.debug(f"{result}")
elif could_reverse:
# Patch is already applied, all is well
logger.info(f"Already applied patch {patchfile}")
else:
# Couldn't apply or reverse? That's not so good, but maybe we are partially applied?
# Reset local changes
cls.remove_local_comfyui_changes()
# Try to apply the patch
logger.info(f"Applying patch {patchfile}")
result = cls._run_get_result(f"git apply {patchfile}", get_comfyui_path())
logger.debug(f"{result}")
if result.returncode:
logger.error(f"Could not apply patch {patchfile}")
if not skip_dot_patch: # FIXME
# Check if the patch has already been applied
result = cls._run_get_result(
f"git apply --check {patchfile}",
get_comfyui_path(),
)
could_apply = not result.returncode
result = cls._run_get_result(
f"git apply --reverse --check {patchfile}",
get_comfyui_path(),
)
could_reverse = not result.returncode
if could_apply:
# Apply the patch
logger.info(f"Applying patch {patchfile}")
result = cls._run_get_result(f"git apply {patchfile}", get_comfyui_path())
logger.debug(f"{result}")
elif could_reverse:
# Patch is already applied, all is well
logger.info(f"Already applied patch {patchfile}")
else:
# Couldn't apply or reverse? That's not so good, but maybe we are partially applied?
# Reset local changes
cls.remove_local_comfyui_changes()
# Try to apply the patch
logger.info(f"Applying patch {patchfile}")
result = cls._run_get_result(f"git apply {patchfile}", get_comfyui_path())
logger.debug(f"{result}")
if result.returncode:
logger.error(f"Could not apply patch {patchfile}")

# Drop in custom node config
config_file = os.path.join(get_comfyui_path(), "extra_model_paths.yaml")
Expand Down
Loading

0 comments on commit e3558ec

Please sign in to comment.